2020-05-08 23:37:21 +00:00
|
|
|
#![cfg(not(debug_assertions))]
|
2019-11-26 23:54:46 +00:00
|
|
|
|
2020-05-06 11:42:56 +00:00
|
|
|
use beacon_chain::attestation_verification::Error as AttnError;
|
2021-09-22 00:37:28 +00:00
|
|
|
use beacon_chain::builder::BeaconChainBuilder;
|
2019-11-26 23:54:46 +00:00
|
|
|
use beacon_chain::test_utils::{
|
2021-10-06 00:46:07 +00:00
|
|
|
test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType,
|
|
|
|
HARNESS_SLOT_TIME,
|
2019-11-26 23:54:46 +00:00
|
|
|
};
|
2021-09-22 00:37:28 +00:00
|
|
|
use beacon_chain::{
|
|
|
|
historical_blocks::HistoricalBlockError, migrate::MigratorConfig, BeaconChain,
|
|
|
|
BeaconChainError, BeaconChainTypes, BeaconSnapshot, ChainConfig, ServerSentEventHandler,
|
|
|
|
WhenSlotSkipped,
|
|
|
|
};
|
2020-10-19 05:58:39 +00:00
|
|
|
use lazy_static::lazy_static;
|
2021-10-06 00:46:07 +00:00
|
|
|
use logging::test_logger;
|
2020-08-26 09:24:55 +00:00
|
|
|
use maplit::hashset;
|
2019-11-26 23:54:46 +00:00
|
|
|
use rand::Rng;
|
2020-04-20 09:59:56 +00:00
|
|
|
use std::collections::HashMap;
|
|
|
|
use std::collections::HashSet;
|
2020-08-26 09:24:55 +00:00
|
|
|
use std::convert::TryInto;
|
2019-11-26 23:54:46 +00:00
|
|
|
use std::sync::Arc;
|
2020-03-04 05:48:35 +00:00
|
|
|
use store::{
|
|
|
|
iter::{BlockRootsIterator, StateRootsIterator},
|
2020-06-16 01:34:04 +00:00
|
|
|
HotColdDB, LevelDB, StoreConfig,
|
2020-03-04 05:48:35 +00:00
|
|
|
};
|
2019-11-26 23:54:46 +00:00
|
|
|
use tempfile::{tempdir, TempDir};
|
|
|
|
use tree_hash::TreeHash;
|
|
|
|
use types::test_utils::{SeedableRng, XorShiftRng};
|
|
|
|
use types::*;
|
|
|
|
|
|
|
|
// Should ideally be divisible by 3.
|
2020-04-20 02:34:37 +00:00
|
|
|
pub const LOW_VALIDATOR_COUNT: usize = 24;
|
|
|
|
pub const HIGH_VALIDATOR_COUNT: usize = 64;
|
2019-11-26 23:54:46 +00:00
|
|
|
|
|
|
|
lazy_static! {
|
|
|
|
/// A cached set of keys.
|
2020-04-20 02:34:37 +00:00
|
|
|
static ref KEYPAIRS: Vec<Keypair> = types::test_utils::generate_deterministic_keypairs(HIGH_VALIDATOR_COUNT);
|
2019-11-26 23:54:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type E = MinimalEthSpec;
|
2020-10-19 05:58:39 +00:00
|
|
|
type TestHarness = BeaconChainHarness<DiskHarnessType<E>>;
|
2019-11-26 23:54:46 +00:00
|
|
|
|
2020-06-16 01:34:04 +00:00
|
|
|
fn get_store(db_path: &TempDir) -> Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>> {
|
2021-08-30 06:41:31 +00:00
|
|
|
get_store_with_spec(db_path, test_spec::<E>())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn get_store_with_spec(
|
|
|
|
db_path: &TempDir,
|
|
|
|
spec: ChainSpec,
|
|
|
|
) -> Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>> {
|
2019-11-26 23:54:46 +00:00
|
|
|
let hot_path = db_path.path().join("hot_db");
|
|
|
|
let cold_path = db_path.path().join("cold_db");
|
2020-02-10 00:30:21 +00:00
|
|
|
let config = StoreConfig::default();
|
2020-10-19 05:58:39 +00:00
|
|
|
let log = test_logger();
|
2020-07-02 23:47:31 +00:00
|
|
|
|
2021-03-04 01:25:12 +00:00
|
|
|
HotColdDB::open(&hot_path, &cold_path, |_, _, _| Ok(()), config, spec, log)
|
|
|
|
.expect("disk store should initialize")
|
2019-11-26 23:54:46 +00:00
|
|
|
}
|
|
|
|
|
2020-06-16 01:34:04 +00:00
|
|
|
fn get_harness(
|
|
|
|
store: Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>>,
|
|
|
|
validator_count: usize,
|
|
|
|
) -> TestHarness {
|
Prepare for public testnet (#628)
* Update to spec v0.9.0
* Update to v0.9.1
* Bump spec tags for v0.9.1
* Formatting, fix CI failures
* Resolve accidental KeyPair merge conflict
* Document new BeaconState functions
* Add `validator` changes from `validator-to-rest`
* Add initial (failing) REST api tests
* Fix signature parsing
* Add more tests
* Refactor http router
* Add working tests for publish beacon block
* Add validator duties tests
* Move account_manager under `lighthouse` binary
* Unify logfile handling in `environment` crate.
* Fix incorrect cache drops in `advance_caches`
* Update fork choice for v0.9.1
* Add `deposit_contract` crate
* Add progress on validator onboarding
* Add unfinished attesation code
* Update account manager CLI
* Write eth1 data file as hex string
* Integrate ValidatorDirectory with validator_client
* Move ValidatorDirectory into validator_client
* Clean up some FIXMEs
* Add beacon_chain_sim
* Fix a few docs/logs
* Expand `beacon_chain_sim`
* Fix spec for `beacon_chain_sim
* More testing for api
* Start work on attestation endpoint
* Reject empty attestations
* Allow attestations to genesis block
* Add working tests for `rest_api` validator endpoint
* Remove grpc from beacon_node
* Start heavy refactor of validator client
- Block production is working
* Prune old validator client files
* Start works on attestation service
* Add attestation service to validator client
* Use full pubkey for validator directories
* Add validator duties post endpoint
* Use par_iter for keypair generation
* Use bulk duties request in validator client
* Add version http endpoint tests
* Add interop keys and startup wait
* Ensure a prompt exit
* Add duties pruning
* Fix compile error in beacon node tests
* Add github workflow
* Modify rust.yaml
* Modify gitlab actions
* Add to CI file
* Add sudo to CI npm install
* Move cargo fmt to own job in tests
* Fix cargo fmt in CI
* Add rustup update before cargo fmt
* Change name of CI job
* Make other CI jobs require cargo fmt
* Add CI badge
* Remove gitlab and travis files
* Add different http timeout for debug
* Update docker file, use makefile in CI
* Use make in the dockerfile, skip the test
* Use the makefile for debug GI test
* Update book
* Tidy grpc and misc things
* Apply discv5 fixes
* Address other minor issues
* Fix warnings
* Attempt fix for addr parsing
* Tidy validator config, CLIs
* Tidy comments
* Tidy signing, reduce ForkService duplication
* Fail if skipping too many slots
* Set default recent genesis time to 0
* Add custom http timeout to validator
* Fix compile bug in node_test_rig
* Remove old bootstrap flag from val CLI
* Update docs
* Tidy val client
* Change val client log levels
* Add comments, more validity checks
* Fix compile error, add comments
* Undo changes to eth2-libp2p/src
* Reduce duplication of keypair generation
* Add more logging for validator duties
* Fix beacon_chain_sim, nitpicks
* Fix compile error, minor nits
* Update to use v0.9.2 version of deposit contract
* Add efforts to automate eth1 testnet deployment
* Fix lcli testnet deployer
* Modify bn CLI to parse eth2_testnet_dir
* Progress with account_manager deposit tools
* Make account manager submit deposits
* Add password option for submitting deposits
* Allow custom deposit amount
* Add long names to lcli clap
* Add password option to lcli deploy command
* Fix minor bugs whilst testing
* Address Michael's comments
* Add refund-deposit-contract to lcli
* Use time instead of skip count for denying long skips
* Improve logging for eth1
* Fix bug with validator services exiting on error
* Drop the block cache after genesis
* Modify eth1 testnet config
* Improve eth1 logging
* Make validator wait until genesis time
* Fix bug in eth1 voting
* Add more logging to eth1 voting
* Handle errors in eth1 http module
* Set SECONDS_PER_DAY to sensible minimum
* Shorten delay before testnet start
* Ensure eth1 block is produced without any votes
* Improve eth1 logging
* Fix broken tests in eth1
* Tidy code in rest_api
* Fix failing test in deposit_contract
* Make CLI args more consistent
* Change validator/duties endpoint
* Add time-based skip slot limiting
* Add new error type missed in previous commit
* Add log when waiting for genesis
* Refactor beacon node CLI
* Remove unused dep
* Add lcli eth1-genesis command
* Fix bug in master merge
* Apply clippy lints to beacon node
* Add support for YamlConfig in Eth2TestnetDir
* Upgrade tesnet deposit contract version
* Remove unnecessary logging and correct formatting
* Add a hardcoded eth2 testnet config
* Ensure http server flag works. Overwrite configs with flags.
* Ensure boot nodes are loaded from testnet dir
* Fix account manager CLI bugs
* Fix bugs with beacon node cli
* Allow testnet dir without boot nodes
* Write genesis state as SSZ
* Remove ---/n from the start of testnet_dir files
* Set default libp2p address
* Tidy account manager CLI, add logging
* Add check to see if testnet dir exists
* Apply reviewers suggestions
* Add HeadTracker struct
* Add fork choice persistence
* Shorten slot time for simulator
* Add the /beacon/heads API endpoint
* Update hardcoded testnet
* Add tests for BeaconChain persistence + fix bugs
* Extend BeaconChain persistence testing
* Ensure chain is finalized b4 persistence tests
* Ensure boot_enr.yaml is include in binary
* Refactor beacon_chain_sim
* Move files about in beacon sim
* Update beacon_chain_sim
* Fix bug with deposit inclusion
* Increase log in genesis service, fix todo
* Tidy sim, fix broken rest_api tests
* Fix more broken tests
* Update testnet
* Fix broken rest api test
* Tidy account manager CLI
* Use tempdir for account manager
* Stop hardcoded testnet dir from creating dir
* Rename Eth2TestnetDir to Eth2TestnetConfig
* Change hardcoded -> hard_coded
* Tidy account manager
* Add log to account manager
* Tidy, ensure head tracker is loaded from disk
* Tidy beacon chain builder
* Tidy eth1_chain
* Adds log support for simulator
* Revert "Adds log support for simulator"
This reverts commit ec77c66a052350f551db145cf20f213823428dd3.
* Adds log support for simulator
* Tidy after self-review
* Change default log level
* Address Michael's delicious PR comments
* Fix off-by-one in tests
2019-12-03 04:28:57 +00:00
|
|
|
let harness = BeaconChainHarness::new_with_disk_store(
|
2019-11-26 23:54:46 +00:00
|
|
|
MinimalEthSpec,
|
2021-07-09 06:15:32 +00:00
|
|
|
None,
|
2019-11-26 23:54:46 +00:00
|
|
|
store,
|
|
|
|
KEYPAIRS[0..validator_count].to_vec(),
|
|
|
|
);
|
|
|
|
harness.advance_slot();
|
|
|
|
harness
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn full_participation_no_skips() {
|
|
|
|
let num_blocks_produced = E::slots_per_epoch() * 5;
|
|
|
|
let db_path = tempdir().unwrap();
|
|
|
|
let store = get_store(&db_path);
|
2020-10-19 05:58:39 +00:00
|
|
|
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
|
2019-11-26 23:54:46 +00:00
|
|
|
|
|
|
|
harness.extend_chain(
|
|
|
|
num_blocks_produced as usize,
|
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
AttestationStrategy::AllValidators,
|
|
|
|
);
|
|
|
|
|
|
|
|
check_finalization(&harness, num_blocks_produced);
|
|
|
|
check_split_slot(&harness, store);
|
|
|
|
check_chain_dump(&harness, num_blocks_produced + 1);
|
2019-12-06 03:29:06 +00:00
|
|
|
check_iterators(&harness);
|
2019-11-26 23:54:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn randomised_skips() {
|
|
|
|
let num_slots = E::slots_per_epoch() * 5;
|
|
|
|
let mut num_blocks_produced = 0;
|
|
|
|
let db_path = tempdir().unwrap();
|
|
|
|
let store = get_store(&db_path);
|
2020-10-19 05:58:39 +00:00
|
|
|
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
|
2019-11-26 23:54:46 +00:00
|
|
|
let rng = &mut XorShiftRng::from_seed([42; 16]);
|
|
|
|
|
|
|
|
let mut head_slot = 0;
|
|
|
|
|
|
|
|
for slot in 1..=num_slots {
|
|
|
|
if rng.gen_bool(0.8) {
|
|
|
|
harness.extend_chain(
|
|
|
|
1,
|
|
|
|
BlockStrategy::ForkCanonicalChainAt {
|
|
|
|
previous_slot: Slot::new(head_slot),
|
|
|
|
first_slot: Slot::new(slot),
|
|
|
|
},
|
|
|
|
AttestationStrategy::AllValidators,
|
|
|
|
);
|
|
|
|
harness.advance_slot();
|
|
|
|
num_blocks_produced += 1;
|
|
|
|
head_slot = slot;
|
|
|
|
} else {
|
|
|
|
harness.advance_slot();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-06 06:30:37 +00:00
|
|
|
let state = &harness.chain.head().expect("should get head").beacon_state;
|
2019-11-26 23:54:46 +00:00
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
assert_eq!(
|
|
|
|
state.slot(),
|
|
|
|
num_slots,
|
|
|
|
"head should be at the current slot"
|
|
|
|
);
|
2019-11-26 23:54:46 +00:00
|
|
|
|
|
|
|
check_split_slot(&harness, store);
|
|
|
|
check_chain_dump(&harness, num_blocks_produced + 1);
|
2019-12-06 03:29:06 +00:00
|
|
|
check_iterators(&harness);
|
2019-11-26 23:54:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn long_skip() {
|
|
|
|
let db_path = tempdir().unwrap();
|
|
|
|
let store = get_store(&db_path);
|
2020-10-19 05:58:39 +00:00
|
|
|
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
|
2019-11-26 23:54:46 +00:00
|
|
|
|
|
|
|
// Number of blocks to create in the first run, intentionally not falling on an epoch
|
|
|
|
// boundary in order to check that the DB hot -> cold migration is capable of reaching
|
|
|
|
// back across the skip distance, and correctly migrating those extra non-finalized states.
|
|
|
|
let initial_blocks = E::slots_per_epoch() * 5 + E::slots_per_epoch() / 2;
|
|
|
|
let skip_slots = E::slots_per_historical_root() as u64 * 8;
|
2020-04-20 02:34:37 +00:00
|
|
|
// Create the minimum ~2.5 epochs of extra blocks required to re-finalize the chain.
|
|
|
|
// Having this set lower ensures that we start justifying and finalizing quickly after a skip.
|
|
|
|
let final_blocks = 2 * E::slots_per_epoch() + E::slots_per_epoch() / 2;
|
2019-11-26 23:54:46 +00:00
|
|
|
|
|
|
|
harness.extend_chain(
|
|
|
|
initial_blocks as usize,
|
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
AttestationStrategy::AllValidators,
|
|
|
|
);
|
|
|
|
|
|
|
|
check_finalization(&harness, initial_blocks);
|
|
|
|
|
|
|
|
// 2. Skip slots
|
|
|
|
for _ in 0..skip_slots {
|
|
|
|
harness.advance_slot();
|
|
|
|
}
|
|
|
|
|
|
|
|
// 3. Produce more blocks, establish a new finalized epoch
|
|
|
|
harness.extend_chain(
|
|
|
|
final_blocks as usize,
|
|
|
|
BlockStrategy::ForkCanonicalChainAt {
|
|
|
|
previous_slot: Slot::new(initial_blocks),
|
|
|
|
first_slot: Slot::new(initial_blocks + skip_slots as u64 + 1),
|
|
|
|
},
|
|
|
|
AttestationStrategy::AllValidators,
|
|
|
|
);
|
|
|
|
|
|
|
|
check_finalization(&harness, initial_blocks + skip_slots + final_blocks);
|
|
|
|
check_split_slot(&harness, store);
|
|
|
|
check_chain_dump(&harness, initial_blocks + final_blocks + 1);
|
2019-12-06 03:29:06 +00:00
|
|
|
check_iterators(&harness);
|
2019-11-26 23:54:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Go forward to the point where the genesis randao value is no longer part of the vector.
|
|
|
|
///
|
|
|
|
/// This implicitly checks that:
|
|
|
|
/// 1. The chunked vector scheme doesn't attempt to store an incorrect genesis value
|
|
|
|
/// 2. We correctly load the genesis value for all required slots
|
|
|
|
/// NOTE: this test takes about a minute to run
|
|
|
|
#[test]
|
|
|
|
fn randao_genesis_storage() {
|
|
|
|
let validator_count = 8;
|
|
|
|
let db_path = tempdir().unwrap();
|
|
|
|
let store = get_store(&db_path);
|
2020-10-19 05:58:39 +00:00
|
|
|
let harness = get_harness(store.clone(), validator_count);
|
2019-11-26 23:54:46 +00:00
|
|
|
|
|
|
|
let num_slots = E::slots_per_epoch() * (E::epochs_per_historical_vector() - 1) as u64;
|
|
|
|
|
|
|
|
// Check we have a non-trivial genesis value
|
|
|
|
let genesis_value = *harness
|
|
|
|
.chain
|
|
|
|
.head()
|
2020-01-06 06:30:37 +00:00
|
|
|
.expect("should get head")
|
2019-11-26 23:54:46 +00:00
|
|
|
.beacon_state
|
|
|
|
.get_randao_mix(Epoch::new(0))
|
|
|
|
.expect("randao mix ok");
|
|
|
|
assert!(!genesis_value.is_zero());
|
|
|
|
|
|
|
|
harness.extend_chain(
|
|
|
|
num_slots as usize - 1,
|
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
AttestationStrategy::AllValidators,
|
|
|
|
);
|
|
|
|
|
|
|
|
// Check that genesis value is still present
|
|
|
|
assert!(harness
|
|
|
|
.chain
|
|
|
|
.head()
|
2020-01-06 06:30:37 +00:00
|
|
|
.expect("should get head")
|
2019-11-26 23:54:46 +00:00
|
|
|
.beacon_state
|
2021-07-09 06:15:32 +00:00
|
|
|
.randao_mixes()
|
2019-11-26 23:54:46 +00:00
|
|
|
.iter()
|
|
|
|
.find(|x| **x == genesis_value)
|
|
|
|
.is_some());
|
|
|
|
|
|
|
|
// Then upon adding one more block, it isn't
|
|
|
|
harness.advance_slot();
|
|
|
|
harness.extend_chain(
|
|
|
|
1,
|
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
AttestationStrategy::AllValidators,
|
|
|
|
);
|
|
|
|
assert!(harness
|
|
|
|
.chain
|
|
|
|
.head()
|
2020-01-06 06:30:37 +00:00
|
|
|
.expect("should get head")
|
2019-11-26 23:54:46 +00:00
|
|
|
.beacon_state
|
2021-07-09 06:15:32 +00:00
|
|
|
.randao_mixes()
|
2019-11-26 23:54:46 +00:00
|
|
|
.iter()
|
|
|
|
.find(|x| **x == genesis_value)
|
|
|
|
.is_none());
|
|
|
|
|
|
|
|
check_finalization(&harness, num_slots);
|
|
|
|
check_split_slot(&harness, store);
|
|
|
|
check_chain_dump(&harness, num_slots + 1);
|
2019-12-06 03:29:06 +00:00
|
|
|
check_iterators(&harness);
|
2019-11-26 23:54:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check that closing and reopening a freezer DB restores the split slot to its correct value.
|
|
|
|
#[test]
|
|
|
|
fn split_slot_restore() {
|
|
|
|
let db_path = tempdir().unwrap();
|
|
|
|
|
|
|
|
let split_slot = {
|
|
|
|
let store = get_store(&db_path);
|
2020-10-19 05:58:39 +00:00
|
|
|
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
|
2019-11-26 23:54:46 +00:00
|
|
|
|
|
|
|
let num_blocks = 4 * E::slots_per_epoch();
|
|
|
|
|
|
|
|
harness.extend_chain(
|
|
|
|
num_blocks as usize,
|
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
AttestationStrategy::AllValidators,
|
|
|
|
);
|
|
|
|
|
|
|
|
store.get_split_slot()
|
|
|
|
};
|
|
|
|
assert_ne!(split_slot, Slot::new(0));
|
|
|
|
|
|
|
|
// Re-open the store
|
|
|
|
let store = get_store(&db_path);
|
|
|
|
|
|
|
|
assert_eq!(store.get_split_slot(), split_slot);
|
|
|
|
}
|
|
|
|
|
2020-01-08 02:58:01 +00:00
|
|
|
// Check attestation processing and `load_epoch_boundary_state` in the presence of a split DB.
|
|
|
|
// This is a bit of a monster test in that it tests lots of different things, but until they're
|
|
|
|
// tested elsewhere, this is as good a place as any.
|
|
|
|
#[test]
|
|
|
|
fn epoch_boundary_state_attestation_processing() {
|
|
|
|
let num_blocks_produced = E::slots_per_epoch() * 5;
|
|
|
|
let db_path = tempdir().unwrap();
|
|
|
|
let store = get_store(&db_path);
|
2020-10-19 05:58:39 +00:00
|
|
|
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
|
2020-01-08 02:58:01 +00:00
|
|
|
|
|
|
|
let late_validators = vec![0, 1];
|
2020-04-20 02:34:37 +00:00
|
|
|
let timely_validators = (2..LOW_VALIDATOR_COUNT).collect::<Vec<_>>();
|
2020-01-08 02:58:01 +00:00
|
|
|
|
|
|
|
let mut late_attestations = vec![];
|
|
|
|
|
|
|
|
for _ in 0..num_blocks_produced {
|
|
|
|
harness.extend_chain(
|
|
|
|
1,
|
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
AttestationStrategy::SomeValidators(timely_validators.clone()),
|
|
|
|
);
|
|
|
|
|
|
|
|
let head = harness.chain.head().expect("head ok");
|
2020-05-06 11:42:56 +00:00
|
|
|
late_attestations.extend(harness.get_unaggregated_attestations(
|
2020-01-08 02:58:01 +00:00
|
|
|
&AttestationStrategy::SomeValidators(late_validators.clone()),
|
|
|
|
&head.beacon_state,
|
2021-03-17 05:09:57 +00:00
|
|
|
head.beacon_state_root(),
|
2020-01-08 02:58:01 +00:00
|
|
|
head.beacon_block_root,
|
2020-02-10 23:19:36 +00:00
|
|
|
head.beacon_block.slot(),
|
2020-01-08 02:58:01 +00:00
|
|
|
));
|
|
|
|
|
|
|
|
harness.advance_slot();
|
|
|
|
}
|
|
|
|
|
|
|
|
check_finalization(&harness, num_blocks_produced);
|
|
|
|
check_split_slot(&harness, store.clone());
|
|
|
|
check_chain_dump(&harness, num_blocks_produced + 1);
|
|
|
|
check_iterators(&harness);
|
|
|
|
|
|
|
|
let mut checked_pre_fin = false;
|
|
|
|
|
2020-06-18 09:11:03 +00:00
|
|
|
for (attestation, subnet_id) in late_attestations.into_iter().flatten() {
|
2020-01-08 02:58:01 +00:00
|
|
|
// load_epoch_boundary_state is idempotent!
|
|
|
|
let block_root = attestation.data.beacon_block_root;
|
2020-02-10 23:19:36 +00:00
|
|
|
let block = store.get_block(&block_root).unwrap().expect("block exists");
|
2020-01-08 02:58:01 +00:00
|
|
|
let epoch_boundary_state = store
|
2020-02-10 23:19:36 +00:00
|
|
|
.load_epoch_boundary_state(&block.state_root())
|
2020-01-08 02:58:01 +00:00
|
|
|
.expect("no error")
|
|
|
|
.expect("epoch boundary state exists");
|
|
|
|
let ebs_of_ebs = store
|
|
|
|
.load_epoch_boundary_state(&epoch_boundary_state.canonical_root())
|
|
|
|
.expect("no error")
|
|
|
|
.expect("ebs of ebs exists");
|
|
|
|
assert_eq!(epoch_boundary_state, ebs_of_ebs);
|
|
|
|
|
|
|
|
// If the attestation is pre-finalization it should be rejected.
|
|
|
|
let finalized_epoch = harness
|
|
|
|
.chain
|
|
|
|
.head_info()
|
2021-07-06 02:38:53 +00:00
|
|
|
.expect("should get head")
|
2020-01-08 02:58:01 +00:00
|
|
|
.finalized_checkpoint
|
|
|
|
.epoch;
|
2020-05-06 11:42:56 +00:00
|
|
|
|
2020-01-08 02:58:01 +00:00
|
|
|
let res = harness
|
|
|
|
.chain
|
Batch BLS verification for attestations (#2399)
## Issue Addressed
NA
## Proposed Changes
Adds the ability to verify batches of aggregated/unaggregated attestations from the network.
When the `BeaconProcessor` finds there are messages in the aggregated or unaggregated attestation queues, it will first check the length of the queue:
- `== 1` verify the attestation individually.
- `>= 2` take up to 64 of those attestations and verify them in a batch.
Notably, we only perform batch verification if the queue has a backlog. We don't apply any artificial delays to attestations to try and force them into batches.
### Batching Details
To assist with implementing batches we modify `beacon_chain::attestation_verification` to have two distinct categories for attestations:
- *Indexed* attestations: those which have passed initial validation and were valid enough for us to derive an `IndexedAttestation`.
- *Verified* attestations: those attestations which were indexed *and also* passed signature verification. These are well-formed, interesting messages which were signed by validators.
The batching functions accept `n` attestations and then return `n` attestation verification `Result`s, where those `Result`s can be any combination of `Ok` or `Err`. In other words, we attempt to verify as many attestations as possible and return specific per-attestation results so peer scores can be updated, if required.
When we batch verify attestations, we first try to map all those attestations to *indexed* attestations. If any of those attestations were able to be indexed, we then perform batch BLS verification on those indexed attestations. If the batch verification succeeds, we convert them into *verified* attestations, disabling individual signature checking. If the batch fails, we convert to verified attestations with individual signature checking enabled.
Ultimately, we optimistically try to do a batch verification of attestation signatures and fall-back to individual verification if it fails. This opens an attach vector for "poisoning" the attestations and causing us to waste a batch verification. I argue that peer scoring should do a good-enough job of defending against this and the typical-case gains massively outweigh the worst-case losses.
## Additional Info
Before this PR, attestation verification took the attestations by value (instead of by reference). It turns out that this was unnecessary and, in my opinion, resulted in some undesirable ergonomics (e.g., we had to pass the attestation back in the `Err` variant to avoid clones). In this PR I've modified attestation verification so that it now takes a reference.
I refactored the `beacon_chain/tests/attestation_verification.rs` tests so they use a builder-esque "tester" struct instead of a weird macro. It made it easier for me to test individual/batch with the same set of tests and I think it was a nice tidy-up. Notably, I did this last to try and make sure my new refactors to *actual* production code would pass under the existing test suite.
2021-09-22 08:49:41 +00:00
|
|
|
.verify_unaggregated_attestation_for_gossip(&attestation, Some(subnet_id));
|
2020-03-05 06:19:35 +00:00
|
|
|
|
2020-05-06 11:42:56 +00:00
|
|
|
let current_slot = harness.chain.slot().expect("should get slot");
|
2020-05-21 00:21:44 +00:00
|
|
|
let expected_attestation_slot = attestation.data.slot;
|
2020-05-06 11:42:56 +00:00
|
|
|
// Extra -1 to handle gossip clock disparity.
|
2020-05-21 00:21:44 +00:00
|
|
|
let expected_earliest_permissible_slot = current_slot - E::slots_per_epoch() - 1;
|
2020-03-05 06:19:35 +00:00
|
|
|
|
2020-05-21 00:21:44 +00:00
|
|
|
if expected_attestation_slot <= finalized_epoch.start_slot(E::slots_per_epoch())
|
|
|
|
|| expected_attestation_slot < expected_earliest_permissible_slot
|
2020-03-05 06:19:35 +00:00
|
|
|
{
|
2020-01-08 02:58:01 +00:00
|
|
|
checked_pre_fin = true;
|
2020-05-21 00:21:44 +00:00
|
|
|
assert!(matches!(
|
Batch BLS verification for attestations (#2399)
## Issue Addressed
NA
## Proposed Changes
Adds the ability to verify batches of aggregated/unaggregated attestations from the network.
When the `BeaconProcessor` finds there are messages in the aggregated or unaggregated attestation queues, it will first check the length of the queue:
- `== 1` verify the attestation individually.
- `>= 2` take up to 64 of those attestations and verify them in a batch.
Notably, we only perform batch verification if the queue has a backlog. We don't apply any artificial delays to attestations to try and force them into batches.
### Batching Details
To assist with implementing batches we modify `beacon_chain::attestation_verification` to have two distinct categories for attestations:
- *Indexed* attestations: those which have passed initial validation and were valid enough for us to derive an `IndexedAttestation`.
- *Verified* attestations: those attestations which were indexed *and also* passed signature verification. These are well-formed, interesting messages which were signed by validators.
The batching functions accept `n` attestations and then return `n` attestation verification `Result`s, where those `Result`s can be any combination of `Ok` or `Err`. In other words, we attempt to verify as many attestations as possible and return specific per-attestation results so peer scores can be updated, if required.
When we batch verify attestations, we first try to map all those attestations to *indexed* attestations. If any of those attestations were able to be indexed, we then perform batch BLS verification on those indexed attestations. If the batch verification succeeds, we convert them into *verified* attestations, disabling individual signature checking. If the batch fails, we convert to verified attestations with individual signature checking enabled.
Ultimately, we optimistically try to do a batch verification of attestation signatures and fall-back to individual verification if it fails. This opens an attach vector for "poisoning" the attestations and causing us to waste a batch verification. I argue that peer scoring should do a good-enough job of defending against this and the typical-case gains massively outweigh the worst-case losses.
## Additional Info
Before this PR, attestation verification took the attestations by value (instead of by reference). It turns out that this was unnecessary and, in my opinion, resulted in some undesirable ergonomics (e.g., we had to pass the attestation back in the `Err` variant to avoid clones). In this PR I've modified attestation verification so that it now takes a reference.
I refactored the `beacon_chain/tests/attestation_verification.rs` tests so they use a builder-esque "tester" struct instead of a weird macro. It made it easier for me to test individual/batch with the same set of tests and I think it was a nice tidy-up. Notably, I did this last to try and make sure my new refactors to *actual* production code would pass under the existing test suite.
2021-09-22 08:49:41 +00:00
|
|
|
res.err().unwrap(),
|
2020-05-06 11:42:56 +00:00
|
|
|
AttnError::PastSlot {
|
|
|
|
attestation_slot,
|
|
|
|
earliest_permissible_slot,
|
|
|
|
}
|
2020-05-21 00:21:44 +00:00
|
|
|
if attestation_slot == expected_attestation_slot && earliest_permissible_slot == expected_earliest_permissible_slot
|
|
|
|
));
|
2020-01-08 02:58:01 +00:00
|
|
|
} else {
|
2020-05-06 11:42:56 +00:00
|
|
|
res.expect("should have verified attetation");
|
2020-01-08 02:58:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
assert!(checked_pre_fin);
|
|
|
|
}
|
|
|
|
|
2020-03-04 05:48:35 +00:00
|
|
|
#[test]
|
|
|
|
fn delete_blocks_and_states() {
|
|
|
|
let db_path = tempdir().unwrap();
|
|
|
|
let store = get_store(&db_path);
|
2020-08-26 09:24:55 +00:00
|
|
|
let validators_keypairs =
|
|
|
|
types::test_utils::generate_deterministic_keypairs(LOW_VALIDATOR_COUNT);
|
2021-07-09 06:15:32 +00:00
|
|
|
let harness = BeaconChainHarness::new_with_disk_store(
|
|
|
|
MinimalEthSpec,
|
|
|
|
None,
|
|
|
|
store.clone(),
|
|
|
|
validators_keypairs,
|
|
|
|
);
|
2020-03-04 05:48:35 +00:00
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
let unforked_blocks: u64 = 4 * E::slots_per_epoch();
|
2020-03-04 05:48:35 +00:00
|
|
|
|
|
|
|
// Finalize an initial portion of the chain.
|
2020-08-26 09:24:55 +00:00
|
|
|
let initial_slots: Vec<Slot> = (1..=unforked_blocks).map(Into::into).collect();
|
2021-03-17 05:09:57 +00:00
|
|
|
let (state, state_root) = harness.get_current_state_and_root();
|
2020-08-26 09:24:55 +00:00
|
|
|
let all_validators = harness.get_all_validators();
|
2021-03-17 05:09:57 +00:00
|
|
|
harness.add_attested_blocks_at_slots(state, state_root, &initial_slots, &all_validators);
|
2020-03-04 05:48:35 +00:00
|
|
|
|
|
|
|
// Create a fork post-finalization.
|
2020-04-20 02:34:37 +00:00
|
|
|
let two_thirds = (LOW_VALIDATOR_COUNT / 3) * 2;
|
2020-03-04 05:48:35 +00:00
|
|
|
let honest_validators: Vec<usize> = (0..two_thirds).collect();
|
2020-04-20 02:34:37 +00:00
|
|
|
let faulty_validators: Vec<usize> = (two_thirds..LOW_VALIDATOR_COUNT).collect();
|
2020-03-04 05:48:35 +00:00
|
|
|
|
2020-04-20 02:34:37 +00:00
|
|
|
let fork_blocks = 2 * E::slots_per_epoch();
|
2020-03-04 05:48:35 +00:00
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
let slot_u64: u64 = harness.get_current_slot().as_u64() + 1;
|
|
|
|
|
|
|
|
let fork1_slots: Vec<Slot> = (slot_u64..(slot_u64 + fork_blocks))
|
|
|
|
.map(Into::into)
|
|
|
|
.collect();
|
|
|
|
let fork2_slots: Vec<Slot> = (slot_u64 + 1..(slot_u64 + 1 + fork_blocks))
|
|
|
|
.map(Into::into)
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
let fork1_state = harness.get_current_state();
|
|
|
|
let fork2_state = fork1_state.clone();
|
|
|
|
let results = harness.add_blocks_on_multiple_chains(vec![
|
|
|
|
(fork1_state, fork1_slots, honest_validators),
|
|
|
|
(fork2_state, fork2_slots, faulty_validators),
|
|
|
|
]);
|
|
|
|
|
|
|
|
let honest_head = results[0].2;
|
|
|
|
let faulty_head = results[1].2;
|
2020-03-04 05:48:35 +00:00
|
|
|
|
2020-05-13 07:05:12 +00:00
|
|
|
assert_ne!(honest_head, faulty_head, "forks should be distinct");
|
2020-03-04 05:48:35 +00:00
|
|
|
let head_info = harness.chain.head_info().expect("should get head");
|
|
|
|
assert_eq!(head_info.slot, unforked_blocks + fork_blocks);
|
|
|
|
|
|
|
|
assert_eq!(
|
2020-08-26 09:24:55 +00:00
|
|
|
head_info.block_root,
|
|
|
|
honest_head.into(),
|
2020-03-04 05:48:35 +00:00
|
|
|
"the honest chain should be the canonical chain",
|
|
|
|
);
|
|
|
|
|
|
|
|
let faulty_head_block = store
|
2020-08-26 09:24:55 +00:00
|
|
|
.get_block(&faulty_head.into())
|
2020-03-04 05:48:35 +00:00
|
|
|
.expect("no errors")
|
|
|
|
.expect("faulty head block exists");
|
|
|
|
|
|
|
|
let faulty_head_state = store
|
|
|
|
.get_state(
|
|
|
|
&faulty_head_block.state_root(),
|
|
|
|
Some(faulty_head_block.slot()),
|
|
|
|
)
|
|
|
|
.expect("no db error")
|
|
|
|
.expect("faulty head state exists");
|
|
|
|
|
|
|
|
// Delete faulty fork
|
|
|
|
// Attempting to load those states should find them unavailable
|
2020-08-26 09:24:55 +00:00
|
|
|
for (state_root, slot) in
|
|
|
|
StateRootsIterator::new(store.clone(), &faulty_head_state).map(Result::unwrap)
|
|
|
|
{
|
|
|
|
if slot <= unforked_blocks {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
store.delete_state(&state_root, slot).unwrap();
|
|
|
|
assert_eq!(store.get_state(&state_root, Some(slot)).unwrap(), None);
|
2020-03-04 05:48:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Double-deleting should also be OK (deleting non-existent things is fine)
|
2020-08-26 09:24:55 +00:00
|
|
|
for (state_root, slot) in
|
|
|
|
StateRootsIterator::new(store.clone(), &faulty_head_state).map(Result::unwrap)
|
|
|
|
{
|
|
|
|
if slot <= unforked_blocks {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
store.delete_state(&state_root, slot).unwrap();
|
2020-03-04 05:48:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Deleting the blocks from the fork should remove them completely
|
2020-08-26 09:24:55 +00:00
|
|
|
for (block_root, slot) in
|
|
|
|
BlockRootsIterator::new(store.clone(), &faulty_head_state).map(Result::unwrap)
|
|
|
|
{
|
|
|
|
if slot <= unforked_blocks + 1 {
|
|
|
|
break;
|
|
|
|
}
|
2020-05-21 00:21:44 +00:00
|
|
|
store.delete_block(&block_root).unwrap();
|
|
|
|
assert_eq!(store.get_block(&block_root).unwrap(), None);
|
2020-03-04 05:48:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Deleting frozen states should do nothing
|
|
|
|
let split_slot = store.get_split_slot();
|
|
|
|
let finalized_states = harness
|
|
|
|
.chain
|
2021-07-06 02:38:53 +00:00
|
|
|
.forwards_iter_state_roots(Slot::new(0))
|
|
|
|
.expect("should get iter")
|
2020-08-26 09:24:55 +00:00
|
|
|
.map(Result::unwrap);
|
2020-03-04 05:48:35 +00:00
|
|
|
|
|
|
|
for (state_root, slot) in finalized_states {
|
2020-08-26 09:24:55 +00:00
|
|
|
if slot < split_slot {
|
|
|
|
store.delete_state(&state_root, slot).unwrap();
|
|
|
|
}
|
2020-03-04 05:48:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// After all that, the chain dump should still be OK
|
|
|
|
check_chain_dump(&harness, unforked_blocks + fork_blocks + 1);
|
|
|
|
}
|
|
|
|
|
2020-04-20 02:34:37 +00:00
|
|
|
// Check that we never produce invalid blocks when there is deep forking that changes the shuffling.
|
|
|
|
// See https://github.com/sigp/lighthouse/issues/845
|
|
|
|
fn multi_epoch_fork_valid_blocks_test(
|
|
|
|
initial_blocks: usize,
|
2020-08-26 09:24:55 +00:00
|
|
|
num_fork1_blocks_: usize,
|
|
|
|
num_fork2_blocks_: usize,
|
2020-04-20 02:34:37 +00:00
|
|
|
num_fork1_validators: usize,
|
|
|
|
) -> (TempDir, TestHarness, Hash256, Hash256) {
|
|
|
|
let db_path = tempdir().unwrap();
|
|
|
|
let store = get_store(&db_path);
|
2020-08-26 09:24:55 +00:00
|
|
|
let validators_keypairs =
|
|
|
|
types::test_utils::generate_deterministic_keypairs(LOW_VALIDATOR_COUNT);
|
2020-10-19 05:58:39 +00:00
|
|
|
let harness =
|
2021-07-09 06:15:32 +00:00
|
|
|
BeaconChainHarness::new_with_disk_store(MinimalEthSpec, None, store, validators_keypairs);
|
2020-08-26 09:24:55 +00:00
|
|
|
|
|
|
|
let num_fork1_blocks: u64 = num_fork1_blocks_.try_into().unwrap();
|
|
|
|
let num_fork2_blocks: u64 = num_fork2_blocks_.try_into().unwrap();
|
2020-04-20 02:34:37 +00:00
|
|
|
|
|
|
|
// Create the initial portion of the chain
|
|
|
|
if initial_blocks > 0 {
|
2020-08-26 09:24:55 +00:00
|
|
|
let initial_slots: Vec<Slot> = (1..=initial_blocks).map(Into::into).collect();
|
2021-03-17 05:09:57 +00:00
|
|
|
let (state, state_root) = harness.get_current_state_and_root();
|
2020-08-26 09:24:55 +00:00
|
|
|
let all_validators = harness.get_all_validators();
|
2021-03-17 05:09:57 +00:00
|
|
|
harness.add_attested_blocks_at_slots(state, state_root, &initial_slots, &all_validators);
|
2020-04-20 02:34:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
assert!(num_fork1_validators <= LOW_VALIDATOR_COUNT);
|
|
|
|
let fork1_validators: Vec<usize> = (0..num_fork1_validators).collect();
|
|
|
|
let fork2_validators: Vec<usize> = (num_fork1_validators..LOW_VALIDATOR_COUNT).collect();
|
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
let fork1_state = harness.get_current_state();
|
|
|
|
let fork2_state = fork1_state.clone();
|
|
|
|
|
|
|
|
let slot_u64: u64 = harness.get_current_slot().as_u64() + 1;
|
|
|
|
let fork1_slots: Vec<Slot> = (slot_u64..(slot_u64 + num_fork1_blocks))
|
|
|
|
.map(Into::into)
|
|
|
|
.collect();
|
|
|
|
let fork2_slots: Vec<Slot> = (slot_u64 + 1..(slot_u64 + 1 + num_fork2_blocks))
|
|
|
|
.map(Into::into)
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
let results = harness.add_blocks_on_multiple_chains(vec![
|
|
|
|
(fork1_state, fork1_slots, fork1_validators),
|
|
|
|
(fork2_state, fork2_slots, fork2_validators),
|
|
|
|
]);
|
2020-04-20 02:34:37 +00:00
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
let head1 = results[0].2;
|
|
|
|
let head2 = results[1].2;
|
|
|
|
|
|
|
|
(db_path, harness, head1.into(), head2.into())
|
2020-04-20 02:34:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// This is the minimal test of block production with different shufflings.
|
|
|
|
#[test]
|
|
|
|
fn block_production_different_shuffling_early() {
|
|
|
|
let slots_per_epoch = E::slots_per_epoch() as usize;
|
|
|
|
multi_epoch_fork_valid_blocks_test(
|
|
|
|
slots_per_epoch - 2,
|
|
|
|
slots_per_epoch + 3,
|
|
|
|
slots_per_epoch + 3,
|
|
|
|
LOW_VALIDATOR_COUNT / 2,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn block_production_different_shuffling_long() {
|
|
|
|
let slots_per_epoch = E::slots_per_epoch() as usize;
|
|
|
|
multi_epoch_fork_valid_blocks_test(
|
|
|
|
2 * slots_per_epoch - 2,
|
|
|
|
3 * slots_per_epoch,
|
|
|
|
3 * slots_per_epoch,
|
|
|
|
LOW_VALIDATOR_COUNT / 2,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that the op pool safely includes multiple attestations per block when necessary.
|
|
|
|
// This checks the correctness of the shuffling compatibility memoization.
|
|
|
|
#[test]
|
|
|
|
fn multiple_attestations_per_block() {
|
|
|
|
let db_path = tempdir().unwrap();
|
|
|
|
let store = get_store(&db_path);
|
2020-10-19 05:58:39 +00:00
|
|
|
let harness = get_harness(store, HIGH_VALIDATOR_COUNT);
|
2020-04-20 02:34:37 +00:00
|
|
|
|
|
|
|
harness.extend_chain(
|
2021-09-22 00:37:28 +00:00
|
|
|
E::slots_per_epoch() as usize * 3,
|
2020-04-20 02:34:37 +00:00
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
AttestationStrategy::AllValidators,
|
|
|
|
);
|
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
let head = harness.chain.head().unwrap();
|
2020-04-20 02:34:37 +00:00
|
|
|
let committees_per_slot = head
|
|
|
|
.beacon_state
|
2021-07-09 06:15:32 +00:00
|
|
|
.get_committee_count_at_slot(head.beacon_state.slot())
|
2020-04-20 02:34:37 +00:00
|
|
|
.unwrap();
|
|
|
|
assert!(committees_per_slot > 1);
|
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
for snapshot in harness.chain.chain_dump().unwrap() {
|
2021-07-09 06:15:32 +00:00
|
|
|
let slot = snapshot.beacon_block.slot();
|
2020-04-20 02:34:37 +00:00
|
|
|
assert_eq!(
|
2021-07-09 06:15:32 +00:00
|
|
|
snapshot
|
|
|
|
.beacon_block
|
|
|
|
.deconstruct()
|
|
|
|
.0
|
|
|
|
.body()
|
|
|
|
.attestations()
|
|
|
|
.len() as u64,
|
|
|
|
if slot <= 1 { 0 } else { committees_per_slot }
|
2020-04-20 02:34:37 +00:00
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn shuffling_compatible_linear_chain() {
|
|
|
|
let db_path = tempdir().unwrap();
|
|
|
|
let store = get_store(&db_path);
|
2020-10-19 05:58:39 +00:00
|
|
|
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
|
2020-04-20 02:34:37 +00:00
|
|
|
|
|
|
|
// Skip the block at the end of the first epoch.
|
|
|
|
let head_block_root = harness.extend_chain(
|
|
|
|
4 * E::slots_per_epoch() as usize,
|
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
AttestationStrategy::AllValidators,
|
|
|
|
);
|
|
|
|
|
|
|
|
check_shuffling_compatible(
|
|
|
|
&harness,
|
|
|
|
&get_state_for_block(&harness, head_block_root),
|
|
|
|
head_block_root,
|
|
|
|
true,
|
|
|
|
true,
|
|
|
|
None,
|
|
|
|
None,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn shuffling_compatible_missing_pivot_block() {
|
|
|
|
let db_path = tempdir().unwrap();
|
|
|
|
let store = get_store(&db_path);
|
2020-10-19 05:58:39 +00:00
|
|
|
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
|
2020-04-20 02:34:37 +00:00
|
|
|
|
|
|
|
// Skip the block at the end of the first epoch.
|
|
|
|
harness.extend_chain(
|
|
|
|
E::slots_per_epoch() as usize - 2,
|
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
AttestationStrategy::AllValidators,
|
|
|
|
);
|
|
|
|
harness.advance_slot();
|
|
|
|
harness.advance_slot();
|
|
|
|
let head_block_root = harness.extend_chain(
|
|
|
|
2 * E::slots_per_epoch() as usize,
|
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
AttestationStrategy::AllValidators,
|
|
|
|
);
|
|
|
|
|
|
|
|
check_shuffling_compatible(
|
|
|
|
&harness,
|
|
|
|
&get_state_for_block(&harness, head_block_root),
|
|
|
|
head_block_root,
|
|
|
|
true,
|
|
|
|
true,
|
|
|
|
Some(E::slots_per_epoch() - 2),
|
|
|
|
Some(E::slots_per_epoch() - 2),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn shuffling_compatible_simple_fork() {
|
|
|
|
let slots_per_epoch = E::slots_per_epoch() as usize;
|
|
|
|
let (db_path, harness, head1, head2) = multi_epoch_fork_valid_blocks_test(
|
|
|
|
2 * slots_per_epoch,
|
|
|
|
3 * slots_per_epoch,
|
|
|
|
3 * slots_per_epoch,
|
|
|
|
LOW_VALIDATOR_COUNT / 2,
|
|
|
|
);
|
|
|
|
|
|
|
|
let head1_state = get_state_for_block(&harness, head1);
|
|
|
|
let head2_state = get_state_for_block(&harness, head2);
|
|
|
|
|
|
|
|
check_shuffling_compatible(&harness, &head1_state, head1, true, true, None, None);
|
|
|
|
check_shuffling_compatible(&harness, &head1_state, head2, false, false, None, None);
|
|
|
|
check_shuffling_compatible(&harness, &head2_state, head1, false, false, None, None);
|
|
|
|
check_shuffling_compatible(&harness, &head2_state, head2, true, true, None, None);
|
|
|
|
|
|
|
|
drop(db_path);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn shuffling_compatible_short_fork() {
|
|
|
|
let slots_per_epoch = E::slots_per_epoch() as usize;
|
|
|
|
let (db_path, harness, head1, head2) = multi_epoch_fork_valid_blocks_test(
|
|
|
|
2 * slots_per_epoch - 2,
|
|
|
|
slots_per_epoch + 2,
|
|
|
|
slots_per_epoch + 2,
|
|
|
|
LOW_VALIDATOR_COUNT / 2,
|
|
|
|
);
|
|
|
|
|
|
|
|
let head1_state = get_state_for_block(&harness, head1);
|
|
|
|
let head2_state = get_state_for_block(&harness, head2);
|
|
|
|
|
|
|
|
check_shuffling_compatible(&harness, &head1_state, head1, true, true, None, None);
|
|
|
|
check_shuffling_compatible(&harness, &head1_state, head2, false, true, None, None);
|
|
|
|
// NOTE: don't check this case, as block 14 from the first chain appears valid on the second
|
|
|
|
// chain due to it matching the second chain's block 15.
|
|
|
|
// check_shuffling_compatible(&harness, &head2_state, head1, false, true, None, None);
|
|
|
|
check_shuffling_compatible(
|
|
|
|
&harness,
|
|
|
|
&head2_state,
|
|
|
|
head2,
|
|
|
|
true,
|
|
|
|
true,
|
|
|
|
// Required because of the skipped slot.
|
|
|
|
Some(2 * E::slots_per_epoch() - 2),
|
|
|
|
None,
|
|
|
|
);
|
|
|
|
|
|
|
|
drop(db_path);
|
|
|
|
}
|
|
|
|
|
|
|
|
fn get_state_for_block(harness: &TestHarness, block_root: Hash256) -> BeaconState<E> {
|
|
|
|
let head_block = harness.chain.get_block(&block_root).unwrap().unwrap();
|
|
|
|
harness
|
|
|
|
.chain
|
|
|
|
.get_state(&head_block.state_root(), Some(head_block.slot()))
|
|
|
|
.unwrap()
|
|
|
|
.unwrap()
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Check the invariants that apply to `shuffling_is_compatible`.
|
|
|
|
fn check_shuffling_compatible(
|
|
|
|
harness: &TestHarness,
|
|
|
|
head_state: &BeaconState<E>,
|
|
|
|
head_block_root: Hash256,
|
|
|
|
current_epoch_valid: bool,
|
|
|
|
previous_epoch_valid: bool,
|
|
|
|
current_epoch_cutoff_slot: Option<u64>,
|
|
|
|
previous_epoch_cutoff_slot: Option<u64>,
|
|
|
|
) {
|
|
|
|
let shuffling_lookahead = harness.chain.spec.min_seed_lookahead.as_u64() + 1;
|
|
|
|
let current_pivot_slot =
|
|
|
|
(head_state.current_epoch() - shuffling_lookahead).end_slot(E::slots_per_epoch());
|
|
|
|
let previous_pivot_slot =
|
|
|
|
(head_state.previous_epoch() - shuffling_lookahead).end_slot(E::slots_per_epoch());
|
|
|
|
|
2020-06-09 23:55:44 +00:00
|
|
|
for maybe_tuple in harness
|
2020-04-20 02:34:37 +00:00
|
|
|
.chain
|
|
|
|
.rev_iter_block_roots_from(head_block_root)
|
|
|
|
.unwrap()
|
|
|
|
{
|
2020-06-09 23:55:44 +00:00
|
|
|
let (block_root, slot) = maybe_tuple.unwrap();
|
2020-04-20 02:34:37 +00:00
|
|
|
// Shuffling is compatible targeting the current epoch,
|
2021-07-06 02:38:53 +00:00
|
|
|
// if slot is greater than or equal to the current epoch pivot block.
|
2020-04-20 02:34:37 +00:00
|
|
|
assert_eq!(
|
|
|
|
harness.chain.shuffling_is_compatible(
|
|
|
|
&block_root,
|
|
|
|
head_state.current_epoch(),
|
|
|
|
&head_state
|
|
|
|
),
|
|
|
|
current_epoch_valid
|
|
|
|
&& slot >= current_epoch_cutoff_slot.unwrap_or(current_pivot_slot.as_u64())
|
|
|
|
);
|
|
|
|
// Similarly for the previous epoch
|
|
|
|
assert_eq!(
|
|
|
|
harness.chain.shuffling_is_compatible(
|
|
|
|
&block_root,
|
|
|
|
head_state.previous_epoch(),
|
|
|
|
&head_state
|
|
|
|
),
|
|
|
|
previous_epoch_valid
|
|
|
|
&& slot >= previous_epoch_cutoff_slot.unwrap_or(previous_pivot_slot.as_u64())
|
|
|
|
);
|
|
|
|
// Targeting the next epoch should always return false
|
|
|
|
assert_eq!(
|
|
|
|
harness.chain.shuffling_is_compatible(
|
|
|
|
&block_root,
|
|
|
|
head_state.current_epoch() + 1,
|
|
|
|
&head_state
|
|
|
|
),
|
|
|
|
false
|
|
|
|
);
|
|
|
|
// Targeting two epochs before the current epoch should also always return false
|
|
|
|
if head_state.current_epoch() >= 2 {
|
|
|
|
assert_eq!(
|
|
|
|
harness.chain.shuffling_is_compatible(
|
|
|
|
&block_root,
|
|
|
|
head_state.current_epoch() - 2,
|
|
|
|
&head_state
|
|
|
|
),
|
|
|
|
false
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-20 09:59:56 +00:00
|
|
|
// Ensure blocks from abandoned forks are pruned from the Hot DB
|
|
|
|
#[test]
|
|
|
|
fn prunes_abandoned_fork_between_two_finalized_checkpoints() {
|
2020-08-26 09:24:55 +00:00
|
|
|
const HONEST_VALIDATOR_COUNT: usize = 16 + 0;
|
|
|
|
const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0;
|
|
|
|
const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT;
|
|
|
|
let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
|
2020-04-20 09:59:56 +00:00
|
|
|
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
2020-08-26 09:24:55 +00:00
|
|
|
let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
2021-07-09 06:15:32 +00:00
|
|
|
let rig = BeaconChainHarness::new(MinimalEthSpec, None, validators_keypairs);
|
2020-08-26 09:24:55 +00:00
|
|
|
let slots_per_epoch = rig.slots_per_epoch();
|
2021-03-17 05:09:57 +00:00
|
|
|
let (mut state, state_root) = rig.get_current_state_and_root();
|
2020-08-26 09:24:55 +00:00
|
|
|
|
|
|
|
let canonical_chain_slots: Vec<Slot> = (1..=rig.epoch_start_slot(1)).map(Slot::new).collect();
|
2021-03-17 05:09:57 +00:00
|
|
|
let (canonical_chain_blocks_pre_finalization, _, _, new_state) = rig
|
|
|
|
.add_attested_blocks_at_slots(
|
|
|
|
state,
|
|
|
|
state_root,
|
|
|
|
&canonical_chain_slots,
|
|
|
|
&honest_validators,
|
|
|
|
);
|
2020-08-26 09:24:55 +00:00
|
|
|
state = new_state;
|
|
|
|
let canonical_chain_slot: u64 = rig.get_current_slot().into();
|
|
|
|
|
|
|
|
let stray_slots: Vec<Slot> = (canonical_chain_slot + 1..rig.epoch_start_slot(2))
|
|
|
|
.map(Slot::new)
|
|
|
|
.collect();
|
2021-03-17 05:09:57 +00:00
|
|
|
let (current_state, current_state_root) = rig.get_current_state_and_root();
|
2020-08-26 09:24:55 +00:00
|
|
|
let (stray_blocks, stray_states, stray_head, _) = rig.add_attested_blocks_at_slots(
|
2021-03-17 05:09:57 +00:00
|
|
|
current_state,
|
|
|
|
current_state_root,
|
2020-08-26 09:24:55 +00:00
|
|
|
&stray_slots,
|
|
|
|
&adversarial_validators,
|
2020-04-20 09:59:56 +00:00
|
|
|
);
|
|
|
|
|
|
|
|
// Precondition: Ensure all stray_blocks blocks are still known
|
|
|
|
for &block_hash in stray_blocks.values() {
|
|
|
|
assert!(
|
2020-08-26 09:24:55 +00:00
|
|
|
rig.block_exists(block_hash),
|
2020-04-20 09:59:56 +00:00
|
|
|
"stray block {} should be still present",
|
|
|
|
block_hash
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (&slot, &state_hash) in &stray_states {
|
|
|
|
assert!(
|
2020-08-26 09:24:55 +00:00
|
|
|
rig.hot_state_exists(state_hash),
|
2020-04-20 09:59:56 +00:00
|
|
|
"stray state {} at slot {} should be still present",
|
|
|
|
state_hash,
|
|
|
|
slot
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
assert_eq!(rig.get_finalized_checkpoints(), hashset! {},);
|
2020-04-20 09:59:56 +00:00
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
assert!(rig.chain.knows_head(&stray_head));
|
2020-04-20 09:59:56 +00:00
|
|
|
|
|
|
|
// Trigger finalization
|
2020-08-26 09:24:55 +00:00
|
|
|
let finalization_slots: Vec<Slot> = ((canonical_chain_slot + 1)
|
|
|
|
..=(canonical_chain_slot + slots_per_epoch * 5))
|
|
|
|
.map(Slot::new)
|
|
|
|
.collect();
|
2021-03-17 05:09:57 +00:00
|
|
|
let state_root = state.update_tree_hash_cache().unwrap();
|
|
|
|
let (canonical_chain_blocks_post_finalization, _, _, _) = rig.add_attested_blocks_at_slots(
|
|
|
|
state,
|
|
|
|
state_root,
|
|
|
|
&finalization_slots,
|
|
|
|
&honest_validators,
|
|
|
|
);
|
2020-04-20 09:59:56 +00:00
|
|
|
|
|
|
|
// Postcondition: New blocks got finalized
|
|
|
|
assert_eq!(
|
2020-08-26 09:24:55 +00:00
|
|
|
rig.get_finalized_checkpoints(),
|
|
|
|
hashset! {
|
|
|
|
canonical_chain_blocks_pre_finalization[&rig.epoch_start_slot(1).into()],
|
|
|
|
canonical_chain_blocks_post_finalization[&rig.epoch_start_slot(2).into()],
|
|
|
|
},
|
2020-04-20 09:59:56 +00:00
|
|
|
);
|
|
|
|
|
|
|
|
// Postcondition: Ensure all stray_blocks blocks have been pruned
|
|
|
|
for &block_hash in stray_blocks.values() {
|
|
|
|
assert!(
|
2020-08-26 09:24:55 +00:00
|
|
|
!rig.block_exists(block_hash),
|
2020-04-20 09:59:56 +00:00
|
|
|
"abandoned block {} should have been pruned",
|
|
|
|
block_hash
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (&slot, &state_hash) in &stray_states {
|
|
|
|
assert!(
|
2020-08-26 09:24:55 +00:00
|
|
|
!rig.hot_state_exists(state_hash),
|
|
|
|
"stray state {} at slot {} should have been pruned",
|
|
|
|
state_hash,
|
|
|
|
slot
|
|
|
|
);
|
|
|
|
assert!(
|
|
|
|
!rig.cold_state_exists(state_hash),
|
|
|
|
"stray state {} at slot {} should have been pruned",
|
2020-04-20 09:59:56 +00:00
|
|
|
state_hash,
|
|
|
|
slot
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
assert!(!rig.chain.knows_head(&stray_head));
|
2020-04-20 09:59:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() {
|
2020-08-26 09:24:55 +00:00
|
|
|
const HONEST_VALIDATOR_COUNT: usize = 16 + 0;
|
|
|
|
const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0;
|
|
|
|
const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT;
|
|
|
|
let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
|
2020-04-20 09:59:56 +00:00
|
|
|
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
2020-08-26 09:24:55 +00:00
|
|
|
let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
2021-07-09 06:15:32 +00:00
|
|
|
let rig = BeaconChainHarness::new(MinimalEthSpec, None, validators_keypairs);
|
2020-08-26 09:24:55 +00:00
|
|
|
let slots_per_epoch = rig.slots_per_epoch();
|
2021-03-17 05:09:57 +00:00
|
|
|
let (state, state_root) = rig.get_current_state_and_root();
|
2020-04-20 09:59:56 +00:00
|
|
|
|
|
|
|
// Fill up 0th epoch
|
2020-08-26 09:24:55 +00:00
|
|
|
let canonical_chain_slots_zeroth_epoch: Vec<Slot> =
|
|
|
|
(1..rig.epoch_start_slot(1)).map(Slot::new).collect();
|
2021-03-17 05:09:57 +00:00
|
|
|
let (_, _, _, mut state) = rig.add_attested_blocks_at_slots(
|
2020-08-26 09:24:55 +00:00
|
|
|
state,
|
2021-03-17 05:09:57 +00:00
|
|
|
state_root,
|
2020-08-26 09:24:55 +00:00
|
|
|
&canonical_chain_slots_zeroth_epoch,
|
|
|
|
&honest_validators,
|
|
|
|
);
|
2020-04-20 09:59:56 +00:00
|
|
|
|
|
|
|
// Fill up 1st epoch
|
2020-08-26 09:24:55 +00:00
|
|
|
let canonical_chain_slots_first_epoch: Vec<Slot> = (rig.epoch_start_slot(1)
|
|
|
|
..=rig.epoch_start_slot(1) + 1)
|
|
|
|
.map(Slot::new)
|
|
|
|
.collect();
|
2021-03-17 05:09:57 +00:00
|
|
|
let state_root = state.update_tree_hash_cache().unwrap();
|
|
|
|
let (canonical_chain_blocks_first_epoch, _, shared_head, mut state) = rig
|
2020-08-26 09:24:55 +00:00
|
|
|
.add_attested_blocks_at_slots(
|
|
|
|
state.clone(),
|
2021-03-17 05:09:57 +00:00
|
|
|
state_root,
|
2020-08-26 09:24:55 +00:00
|
|
|
&canonical_chain_slots_first_epoch,
|
|
|
|
&honest_validators,
|
|
|
|
);
|
|
|
|
let canonical_chain_slot: u64 = rig.get_current_slot().into();
|
|
|
|
|
|
|
|
let stray_chain_slots_first_epoch: Vec<Slot> = (rig.epoch_start_slot(1) + 2
|
|
|
|
..=rig.epoch_start_slot(1) + 2)
|
|
|
|
.map(Slot::new)
|
|
|
|
.collect();
|
2021-03-17 05:09:57 +00:00
|
|
|
let state_root = state.update_tree_hash_cache().unwrap();
|
2020-08-26 09:24:55 +00:00
|
|
|
let (stray_blocks, stray_states, stray_head, _) = rig.add_attested_blocks_at_slots(
|
|
|
|
state.clone(),
|
2021-03-17 05:09:57 +00:00
|
|
|
state_root,
|
2020-08-26 09:24:55 +00:00
|
|
|
&stray_chain_slots_first_epoch,
|
|
|
|
&adversarial_validators,
|
2020-04-20 09:59:56 +00:00
|
|
|
);
|
|
|
|
|
|
|
|
// Preconditions
|
|
|
|
for &block_hash in stray_blocks.values() {
|
|
|
|
assert!(
|
2020-08-26 09:24:55 +00:00
|
|
|
rig.block_exists(block_hash),
|
2020-04-20 09:59:56 +00:00
|
|
|
"stray block {} should be still present",
|
|
|
|
block_hash
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (&slot, &state_hash) in &stray_states {
|
|
|
|
assert!(
|
2020-08-26 09:24:55 +00:00
|
|
|
rig.hot_state_exists(state_hash),
|
2020-04-20 09:59:56 +00:00
|
|
|
"stray state {} at slot {} should be still present",
|
|
|
|
state_hash,
|
|
|
|
slot
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
let chain_dump = rig.chain.chain_dump().unwrap();
|
2020-04-20 09:59:56 +00:00
|
|
|
assert_eq!(
|
|
|
|
get_finalized_epoch_boundary_blocks(&chain_dump),
|
|
|
|
vec![Hash256::zero().into()].into_iter().collect(),
|
|
|
|
);
|
|
|
|
|
|
|
|
assert!(get_blocks(&chain_dump).contains(&shared_head));
|
|
|
|
|
|
|
|
// Trigger finalization
|
2020-08-26 09:24:55 +00:00
|
|
|
let finalization_slots: Vec<Slot> = ((canonical_chain_slot + 1)
|
|
|
|
..=(canonical_chain_slot + slots_per_epoch * 5))
|
|
|
|
.map(Slot::new)
|
|
|
|
.collect();
|
2021-03-17 05:09:57 +00:00
|
|
|
let state_root = state.update_tree_hash_cache().unwrap();
|
|
|
|
let (canonical_chain_blocks, _, _, _) = rig.add_attested_blocks_at_slots(
|
|
|
|
state,
|
|
|
|
state_root,
|
|
|
|
&finalization_slots,
|
|
|
|
&honest_validators,
|
|
|
|
);
|
2020-04-20 09:59:56 +00:00
|
|
|
|
|
|
|
// Postconditions
|
|
|
|
assert_eq!(
|
2020-08-26 09:24:55 +00:00
|
|
|
rig.get_finalized_checkpoints(),
|
|
|
|
hashset! {
|
|
|
|
canonical_chain_blocks_first_epoch[&rig.epoch_start_slot(1).into()],
|
|
|
|
canonical_chain_blocks[&rig.epoch_start_slot(2).into()],
|
|
|
|
},
|
2020-04-20 09:59:56 +00:00
|
|
|
);
|
|
|
|
|
|
|
|
for &block_hash in stray_blocks.values() {
|
|
|
|
assert!(
|
2020-08-26 09:24:55 +00:00
|
|
|
!rig.block_exists(block_hash),
|
2020-04-20 09:59:56 +00:00
|
|
|
"stray block {} should have been pruned",
|
|
|
|
block_hash,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (&slot, &state_hash) in &stray_states {
|
|
|
|
assert!(
|
2020-08-26 09:24:55 +00:00
|
|
|
!rig.hot_state_exists(state_hash),
|
|
|
|
"stray state {} at slot {} should have been pruned",
|
|
|
|
state_hash,
|
|
|
|
slot
|
|
|
|
);
|
|
|
|
assert!(
|
|
|
|
!rig.cold_state_exists(state_hash),
|
|
|
|
"stray state {} at slot {} should have been pruned",
|
2020-04-20 09:59:56 +00:00
|
|
|
state_hash,
|
|
|
|
slot
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
assert!(!rig.chain.knows_head(&stray_head));
|
|
|
|
let chain_dump = rig.chain.chain_dump().unwrap();
|
2020-04-20 09:59:56 +00:00
|
|
|
assert!(get_blocks(&chain_dump).contains(&shared_head));
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn pruning_does_not_touch_blocks_prior_to_finalization() {
|
2020-08-26 09:24:55 +00:00
|
|
|
const HONEST_VALIDATOR_COUNT: usize = 16;
|
|
|
|
const ADVERSARIAL_VALIDATOR_COUNT: usize = 8;
|
|
|
|
const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT;
|
|
|
|
let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
|
2020-04-20 09:59:56 +00:00
|
|
|
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
2020-08-26 09:24:55 +00:00
|
|
|
let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
2021-07-09 06:15:32 +00:00
|
|
|
let rig = BeaconChainHarness::new(MinimalEthSpec, None, validators_keypairs);
|
2020-08-26 09:24:55 +00:00
|
|
|
let slots_per_epoch = rig.slots_per_epoch();
|
2021-03-17 05:09:57 +00:00
|
|
|
let (mut state, state_root) = rig.get_current_state_and_root();
|
2020-04-20 09:59:56 +00:00
|
|
|
|
|
|
|
// Fill up 0th epoch with canonical chain blocks
|
2020-08-26 09:24:55 +00:00
|
|
|
let zeroth_epoch_slots: Vec<Slot> = (1..=rig.epoch_start_slot(1)).map(Slot::new).collect();
|
2021-03-17 05:09:57 +00:00
|
|
|
let (canonical_chain_blocks, _, _, new_state) = rig.add_attested_blocks_at_slots(
|
|
|
|
state,
|
|
|
|
state_root,
|
|
|
|
&zeroth_epoch_slots,
|
|
|
|
&honest_validators,
|
|
|
|
);
|
2020-08-26 09:24:55 +00:00
|
|
|
state = new_state;
|
|
|
|
let canonical_chain_slot: u64 = rig.get_current_slot().into();
|
2020-04-20 09:59:56 +00:00
|
|
|
|
|
|
|
// Fill up 1st epoch. Contains a fork.
|
2020-08-26 09:24:55 +00:00
|
|
|
let first_epoch_slots: Vec<Slot> = ((rig.epoch_start_slot(1) + 1)..(rig.epoch_start_slot(2)))
|
|
|
|
.map(Slot::new)
|
|
|
|
.collect();
|
2021-03-17 05:09:57 +00:00
|
|
|
let state_root = state.update_tree_hash_cache().unwrap();
|
2020-08-26 09:24:55 +00:00
|
|
|
let (stray_blocks, stray_states, stray_head, _) = rig.add_attested_blocks_at_slots(
|
|
|
|
state.clone(),
|
2021-03-17 05:09:57 +00:00
|
|
|
state_root,
|
2020-08-26 09:24:55 +00:00
|
|
|
&first_epoch_slots,
|
|
|
|
&adversarial_validators,
|
|
|
|
);
|
2020-04-20 09:59:56 +00:00
|
|
|
|
|
|
|
// Preconditions
|
|
|
|
for &block_hash in stray_blocks.values() {
|
|
|
|
assert!(
|
2020-08-26 09:24:55 +00:00
|
|
|
rig.block_exists(block_hash),
|
2020-04-20 09:59:56 +00:00
|
|
|
"stray block {} should be still present",
|
|
|
|
block_hash
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (&slot, &state_hash) in &stray_states {
|
|
|
|
assert!(
|
2020-08-26 09:24:55 +00:00
|
|
|
rig.hot_state_exists(state_hash),
|
2020-04-20 09:59:56 +00:00
|
|
|
"stray state {} at slot {} should be still present",
|
|
|
|
state_hash,
|
|
|
|
slot
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
assert_eq!(rig.get_finalized_checkpoints(), hashset! {});
|
2020-04-20 09:59:56 +00:00
|
|
|
|
|
|
|
// Trigger finalization
|
2020-08-26 09:24:55 +00:00
|
|
|
let slots: Vec<Slot> = ((canonical_chain_slot + 1)
|
|
|
|
..=(canonical_chain_slot + slots_per_epoch * 4))
|
|
|
|
.map(Slot::new)
|
|
|
|
.collect();
|
2021-03-17 05:09:57 +00:00
|
|
|
let state_root = state.update_tree_hash_cache().unwrap();
|
|
|
|
let (_, _, _, _) =
|
|
|
|
rig.add_attested_blocks_at_slots(state, state_root, &slots, &honest_validators);
|
2020-04-20 09:59:56 +00:00
|
|
|
|
|
|
|
// Postconditions
|
|
|
|
assert_eq!(
|
2020-08-26 09:24:55 +00:00
|
|
|
rig.get_finalized_checkpoints(),
|
|
|
|
hashset! {canonical_chain_blocks[&rig.epoch_start_slot(1).into()]},
|
2020-04-20 09:59:56 +00:00
|
|
|
);
|
|
|
|
|
|
|
|
for &block_hash in stray_blocks.values() {
|
|
|
|
assert!(
|
2020-08-26 09:24:55 +00:00
|
|
|
rig.block_exists(block_hash),
|
2020-04-20 09:59:56 +00:00
|
|
|
"stray block {} should be still present",
|
|
|
|
block_hash
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (&slot, &state_hash) in &stray_states {
|
|
|
|
assert!(
|
2020-08-26 09:24:55 +00:00
|
|
|
rig.hot_state_exists(state_hash),
|
2020-04-20 09:59:56 +00:00
|
|
|
"stray state {} at slot {} should be still present",
|
|
|
|
state_hash,
|
|
|
|
slot
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
assert!(rig.chain.knows_head(&stray_head));
|
2020-04-20 09:59:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2020-08-26 09:24:55 +00:00
|
|
|
fn prunes_fork_growing_past_youngest_finalized_checkpoint() {
|
|
|
|
const HONEST_VALIDATOR_COUNT: usize = 16 + 0;
|
|
|
|
const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0;
|
|
|
|
const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT;
|
|
|
|
let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
|
2020-04-20 09:59:56 +00:00
|
|
|
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
2020-08-26 09:24:55 +00:00
|
|
|
let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
2021-07-09 06:15:32 +00:00
|
|
|
let rig = BeaconChainHarness::new(MinimalEthSpec, None, validators_keypairs);
|
2021-03-17 05:09:57 +00:00
|
|
|
let (state, state_root) = rig.get_current_state_and_root();
|
2020-04-20 09:59:56 +00:00
|
|
|
|
|
|
|
// Fill up 0th epoch with canonical chain blocks
|
2020-08-26 09:24:55 +00:00
|
|
|
let zeroth_epoch_slots: Vec<Slot> = (1..=rig.epoch_start_slot(1)).map(Slot::new).collect();
|
2021-03-17 05:09:57 +00:00
|
|
|
let (canonical_blocks_zeroth_epoch, _, _, mut state) = rig.add_attested_blocks_at_slots(
|
|
|
|
state,
|
|
|
|
state_root,
|
|
|
|
&zeroth_epoch_slots,
|
|
|
|
&honest_validators,
|
|
|
|
);
|
2020-04-20 09:59:56 +00:00
|
|
|
|
|
|
|
// Fill up 1st epoch. Contains a fork.
|
2020-08-26 09:24:55 +00:00
|
|
|
let slots_first_epoch: Vec<Slot> = (rig.epoch_start_slot(1) + 1..rig.epoch_start_slot(2))
|
|
|
|
.map(Into::into)
|
|
|
|
.collect();
|
2021-03-17 05:09:57 +00:00
|
|
|
let state_root = state.update_tree_hash_cache().unwrap();
|
|
|
|
let (stray_blocks_first_epoch, stray_states_first_epoch, _, mut stray_state) = rig
|
|
|
|
.add_attested_blocks_at_slots(
|
|
|
|
state.clone(),
|
|
|
|
state_root,
|
|
|
|
&slots_first_epoch,
|
|
|
|
&adversarial_validators,
|
|
|
|
);
|
|
|
|
let (canonical_blocks_first_epoch, _, _, mut canonical_state) =
|
|
|
|
rig.add_attested_blocks_at_slots(state, state_root, &slots_first_epoch, &honest_validators);
|
2020-04-20 09:59:56 +00:00
|
|
|
|
|
|
|
// Fill up 2nd epoch. Extends both the canonical chain and the fork.
|
2020-08-26 09:24:55 +00:00
|
|
|
let stray_slots_second_epoch: Vec<Slot> = (rig.epoch_start_slot(2)
|
|
|
|
..=rig.epoch_start_slot(2) + 1)
|
|
|
|
.map(Into::into)
|
|
|
|
.collect();
|
2021-03-17 05:09:57 +00:00
|
|
|
let stray_state_root = stray_state.update_tree_hash_cache().unwrap();
|
2020-08-26 09:24:55 +00:00
|
|
|
let (stray_blocks_second_epoch, stray_states_second_epoch, stray_head, _) = rig
|
|
|
|
.add_attested_blocks_at_slots(
|
2020-04-20 09:59:56 +00:00
|
|
|
stray_state,
|
2021-03-17 05:09:57 +00:00
|
|
|
stray_state_root,
|
2020-08-26 09:24:55 +00:00
|
|
|
&stray_slots_second_epoch,
|
|
|
|
&adversarial_validators,
|
2020-04-20 09:59:56 +00:00
|
|
|
);
|
|
|
|
|
|
|
|
// Precondition: Ensure all stray_blocks blocks are still known
|
|
|
|
let stray_blocks: HashMap<Slot, SignedBeaconBlockHash> = stray_blocks_first_epoch
|
|
|
|
.into_iter()
|
|
|
|
.chain(stray_blocks_second_epoch.into_iter())
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
let stray_states: HashMap<Slot, BeaconStateHash> = stray_states_first_epoch
|
|
|
|
.into_iter()
|
|
|
|
.chain(stray_states_second_epoch.into_iter())
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
for &block_hash in stray_blocks.values() {
|
|
|
|
assert!(
|
2020-08-26 09:24:55 +00:00
|
|
|
rig.block_exists(block_hash),
|
2020-04-20 09:59:56 +00:00
|
|
|
"stray block {} should be still present",
|
|
|
|
block_hash
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (&slot, &state_hash) in &stray_states {
|
|
|
|
assert!(
|
2020-08-26 09:24:55 +00:00
|
|
|
rig.hot_state_exists(state_hash),
|
2020-04-20 09:59:56 +00:00
|
|
|
"stray state {} at slot {} should be still present",
|
|
|
|
state_hash,
|
|
|
|
slot
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
// Precondition: Nothing is finalized yet
|
|
|
|
assert_eq!(rig.get_finalized_checkpoints(), hashset! {},);
|
2020-04-20 09:59:56 +00:00
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
assert!(rig.chain.knows_head(&stray_head));
|
2020-04-20 09:59:56 +00:00
|
|
|
|
|
|
|
// Trigger finalization
|
2020-08-26 09:24:55 +00:00
|
|
|
let canonical_slots: Vec<Slot> = (rig.epoch_start_slot(2)..=rig.epoch_start_slot(6))
|
|
|
|
.map(Into::into)
|
|
|
|
.collect();
|
2021-03-17 05:09:57 +00:00
|
|
|
let canonical_state_root = canonical_state.update_tree_hash_cache().unwrap();
|
|
|
|
let (canonical_blocks, _, _, _) = rig.add_attested_blocks_at_slots(
|
|
|
|
canonical_state,
|
|
|
|
canonical_state_root,
|
|
|
|
&canonical_slots,
|
|
|
|
&honest_validators,
|
|
|
|
);
|
2020-04-20 09:59:56 +00:00
|
|
|
|
|
|
|
// Postconditions
|
|
|
|
let canonical_blocks: HashMap<Slot, SignedBeaconBlockHash> = canonical_blocks_zeroth_epoch
|
|
|
|
.into_iter()
|
|
|
|
.chain(canonical_blocks_first_epoch.into_iter())
|
2020-08-26 09:24:55 +00:00
|
|
|
.chain(canonical_blocks.into_iter())
|
2020-04-20 09:59:56 +00:00
|
|
|
.collect();
|
|
|
|
|
|
|
|
// Postcondition: New blocks got finalized
|
|
|
|
assert_eq!(
|
2020-08-26 09:24:55 +00:00
|
|
|
rig.get_finalized_checkpoints(),
|
|
|
|
hashset! {
|
|
|
|
canonical_blocks[&rig.epoch_start_slot(1).into()],
|
|
|
|
canonical_blocks[&rig.epoch_start_slot(2).into()],
|
|
|
|
},
|
2020-04-20 09:59:56 +00:00
|
|
|
);
|
|
|
|
|
|
|
|
// Postcondition: Ensure all stray_blocks blocks have been pruned
|
|
|
|
for &block_hash in stray_blocks.values() {
|
|
|
|
assert!(
|
2020-08-26 09:24:55 +00:00
|
|
|
!rig.block_exists(block_hash),
|
2020-04-20 09:59:56 +00:00
|
|
|
"abandoned block {} should have been pruned",
|
|
|
|
block_hash
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (&slot, &state_hash) in &stray_states {
|
|
|
|
assert!(
|
2020-08-26 09:24:55 +00:00
|
|
|
!rig.hot_state_exists(state_hash),
|
|
|
|
"stray state {} at slot {} should have been pruned",
|
|
|
|
state_hash,
|
|
|
|
slot
|
|
|
|
);
|
|
|
|
assert!(
|
|
|
|
!rig.cold_state_exists(state_hash),
|
|
|
|
"stray state {} at slot {} should have been pruned",
|
2020-04-20 09:59:56 +00:00
|
|
|
state_hash,
|
|
|
|
slot
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
assert!(!rig.chain.knows_head(&stray_head));
|
2020-04-20 09:59:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// This is to check if state outside of normal block processing are pruned correctly.
|
|
|
|
#[test]
|
|
|
|
fn prunes_skipped_slots_states() {
|
2020-08-26 09:24:55 +00:00
|
|
|
const HONEST_VALIDATOR_COUNT: usize = 16 + 0;
|
|
|
|
const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0;
|
|
|
|
const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT;
|
|
|
|
let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
|
2020-04-20 09:59:56 +00:00
|
|
|
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
2020-08-26 09:24:55 +00:00
|
|
|
let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
2021-07-09 06:15:32 +00:00
|
|
|
let rig = BeaconChainHarness::new(MinimalEthSpec, None, validators_keypairs);
|
2021-03-17 05:09:57 +00:00
|
|
|
let (state, state_root) = rig.get_current_state_and_root();
|
2020-08-26 09:24:55 +00:00
|
|
|
|
|
|
|
let canonical_slots_zeroth_epoch: Vec<Slot> =
|
|
|
|
(1..=rig.epoch_start_slot(1)).map(Into::into).collect();
|
2021-03-17 05:09:57 +00:00
|
|
|
let (canonical_blocks_zeroth_epoch, _, _, mut canonical_state) = rig
|
|
|
|
.add_attested_blocks_at_slots(
|
|
|
|
state.clone(),
|
|
|
|
state_root,
|
|
|
|
&canonical_slots_zeroth_epoch,
|
|
|
|
&honest_validators,
|
|
|
|
);
|
2020-08-26 09:24:55 +00:00
|
|
|
|
|
|
|
let skipped_slot: Slot = (rig.epoch_start_slot(1) + 1).into();
|
2020-04-20 09:59:56 +00:00
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
let stray_slots: Vec<Slot> = ((skipped_slot + 1).into()..rig.epoch_start_slot(2))
|
|
|
|
.map(Into::into)
|
|
|
|
.collect();
|
2021-03-17 05:09:57 +00:00
|
|
|
let canonical_state_root = canonical_state.update_tree_hash_cache().unwrap();
|
2020-08-26 09:24:55 +00:00
|
|
|
let (stray_blocks, stray_states, _, stray_state) = rig.add_attested_blocks_at_slots(
|
2020-04-20 09:59:56 +00:00
|
|
|
canonical_state.clone(),
|
2021-03-17 05:09:57 +00:00
|
|
|
canonical_state_root,
|
2020-08-26 09:24:55 +00:00
|
|
|
&stray_slots,
|
|
|
|
&adversarial_validators,
|
2020-04-20 09:59:56 +00:00
|
|
|
);
|
|
|
|
|
|
|
|
// Preconditions
|
|
|
|
for &block_hash in stray_blocks.values() {
|
|
|
|
assert!(
|
2020-08-26 09:24:55 +00:00
|
|
|
rig.block_exists(block_hash),
|
2020-04-20 09:59:56 +00:00
|
|
|
"stray block {} should be still present",
|
|
|
|
block_hash
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (&slot, &state_hash) in &stray_states {
|
|
|
|
assert!(
|
2020-08-26 09:24:55 +00:00
|
|
|
rig.hot_state_exists(state_hash),
|
2020-04-20 09:59:56 +00:00
|
|
|
"stray state {} at slot {} should be still present",
|
|
|
|
state_hash,
|
|
|
|
slot
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
assert_eq!(rig.get_finalized_checkpoints(), hashset! {},);
|
2020-04-20 09:59:56 +00:00
|
|
|
|
|
|
|
// Make sure slots were skipped
|
2020-08-26 09:24:55 +00:00
|
|
|
assert!(rig.is_skipped_slot(&stray_state, skipped_slot));
|
|
|
|
{
|
|
|
|
let state_hash = (*stray_state.get_state_root(skipped_slot).unwrap()).into();
|
2020-04-20 09:59:56 +00:00
|
|
|
assert!(
|
2020-08-26 09:24:55 +00:00
|
|
|
rig.hot_state_exists(state_hash),
|
|
|
|
"skipped slot state {} should be still present",
|
|
|
|
state_hash
|
2020-04-20 09:59:56 +00:00
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Trigger finalization
|
2020-08-26 09:24:55 +00:00
|
|
|
let canonical_slots: Vec<Slot> = ((skipped_slot + 1).into()..rig.epoch_start_slot(7))
|
|
|
|
.map(Into::into)
|
|
|
|
.collect();
|
2021-03-17 05:09:57 +00:00
|
|
|
let canonical_state_root = canonical_state.update_tree_hash_cache().unwrap();
|
|
|
|
let (canonical_blocks_post_finalization, _, _, _) = rig.add_attested_blocks_at_slots(
|
|
|
|
canonical_state,
|
|
|
|
canonical_state_root,
|
|
|
|
&canonical_slots,
|
|
|
|
&honest_validators,
|
|
|
|
);
|
2020-08-26 09:24:55 +00:00
|
|
|
|
|
|
|
// Postconditions
|
|
|
|
let canonical_blocks: HashMap<Slot, SignedBeaconBlockHash> = canonical_blocks_zeroth_epoch
|
|
|
|
.into_iter()
|
|
|
|
.chain(canonical_blocks_post_finalization.into_iter())
|
|
|
|
.collect();
|
|
|
|
assert_eq!(
|
|
|
|
rig.get_finalized_checkpoints(),
|
|
|
|
hashset! {
|
|
|
|
canonical_blocks[&rig.epoch_start_slot(1).into()],
|
|
|
|
canonical_blocks[&rig.epoch_start_slot(2).into()],
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
|
|
|
for (&slot, &state_hash) in &stray_states {
|
|
|
|
assert!(
|
|
|
|
!rig.hot_state_exists(state_hash),
|
|
|
|
"stray state {} at slot {} should have been pruned",
|
|
|
|
state_hash,
|
|
|
|
slot
|
|
|
|
);
|
|
|
|
assert!(
|
|
|
|
!rig.cold_state_exists(state_hash),
|
|
|
|
"stray state {} at slot {} should have been pruned",
|
|
|
|
state_hash,
|
|
|
|
slot
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
assert!(rig.is_skipped_slot(&stray_state, skipped_slot));
|
|
|
|
{
|
|
|
|
let state_hash: BeaconStateHash =
|
|
|
|
(*stray_state.get_state_root(skipped_slot).unwrap()).into();
|
|
|
|
assert!(
|
|
|
|
!rig.hot_state_exists(state_hash),
|
|
|
|
"skipped slot {} state {} should have been pruned",
|
|
|
|
skipped_slot,
|
|
|
|
state_hash
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// This is to check if state outside of normal block processing are pruned correctly.
|
|
|
|
#[test]
|
|
|
|
fn finalizes_non_epoch_start_slot() {
|
|
|
|
const HONEST_VALIDATOR_COUNT: usize = 16 + 0;
|
|
|
|
const ADVERSARIAL_VALIDATOR_COUNT: usize = 8 - 0;
|
|
|
|
const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT;
|
|
|
|
let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
|
|
|
|
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
|
|
|
let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
2021-07-09 06:15:32 +00:00
|
|
|
let rig = BeaconChainHarness::new(MinimalEthSpec, None, validators_keypairs);
|
2021-03-17 05:09:57 +00:00
|
|
|
let (state, state_root) = rig.get_current_state_and_root();
|
2020-08-26 09:24:55 +00:00
|
|
|
|
|
|
|
let canonical_slots_zeroth_epoch: Vec<Slot> =
|
|
|
|
(1..rig.epoch_start_slot(1)).map(Into::into).collect();
|
2021-03-17 05:09:57 +00:00
|
|
|
let (canonical_blocks_zeroth_epoch, _, _, mut canonical_state) = rig
|
|
|
|
.add_attested_blocks_at_slots(
|
|
|
|
state.clone(),
|
|
|
|
state_root,
|
|
|
|
&canonical_slots_zeroth_epoch,
|
|
|
|
&honest_validators,
|
|
|
|
);
|
2020-08-26 09:24:55 +00:00
|
|
|
|
|
|
|
let skipped_slot: Slot = rig.epoch_start_slot(1).into();
|
|
|
|
|
|
|
|
let stray_slots: Vec<Slot> = ((skipped_slot + 1).into()..rig.epoch_start_slot(2))
|
|
|
|
.map(Into::into)
|
|
|
|
.collect();
|
2021-03-17 05:09:57 +00:00
|
|
|
let canonical_state_root = canonical_state.update_tree_hash_cache().unwrap();
|
2020-08-26 09:24:55 +00:00
|
|
|
let (stray_blocks, stray_states, _, stray_state) = rig.add_attested_blocks_at_slots(
|
|
|
|
canonical_state.clone(),
|
2021-03-17 05:09:57 +00:00
|
|
|
canonical_state_root,
|
2020-08-26 09:24:55 +00:00
|
|
|
&stray_slots,
|
|
|
|
&adversarial_validators,
|
2020-05-06 11:42:56 +00:00
|
|
|
);
|
2020-04-20 09:59:56 +00:00
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
// Preconditions
|
|
|
|
for &block_hash in stray_blocks.values() {
|
|
|
|
assert!(
|
|
|
|
rig.block_exists(block_hash),
|
|
|
|
"stray block {} should be still present",
|
|
|
|
block_hash
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (&slot, &state_hash) in &stray_states {
|
|
|
|
assert!(
|
|
|
|
rig.hot_state_exists(state_hash),
|
|
|
|
"stray state {} at slot {} should be still present",
|
|
|
|
state_hash,
|
|
|
|
slot
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
assert_eq!(rig.get_finalized_checkpoints(), hashset! {});
|
|
|
|
|
|
|
|
// Make sure slots were skipped
|
|
|
|
assert!(rig.is_skipped_slot(&stray_state, skipped_slot));
|
|
|
|
{
|
|
|
|
let state_hash = (*stray_state.get_state_root(skipped_slot).unwrap()).into();
|
|
|
|
assert!(
|
|
|
|
rig.hot_state_exists(state_hash),
|
|
|
|
"skipped slot state {} should be still present",
|
|
|
|
state_hash
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Trigger finalization
|
|
|
|
let canonical_slots: Vec<Slot> = ((skipped_slot + 1).into()..rig.epoch_start_slot(7))
|
|
|
|
.map(Into::into)
|
|
|
|
.collect();
|
2021-03-17 05:09:57 +00:00
|
|
|
let canonical_state_root = canonical_state.update_tree_hash_cache().unwrap();
|
|
|
|
let (canonical_blocks_post_finalization, _, _, _) = rig.add_attested_blocks_at_slots(
|
|
|
|
canonical_state,
|
|
|
|
canonical_state_root,
|
|
|
|
&canonical_slots,
|
|
|
|
&honest_validators,
|
|
|
|
);
|
2020-08-26 09:24:55 +00:00
|
|
|
|
2020-04-20 09:59:56 +00:00
|
|
|
// Postconditions
|
|
|
|
let canonical_blocks: HashMap<Slot, SignedBeaconBlockHash> = canonical_blocks_zeroth_epoch
|
|
|
|
.into_iter()
|
|
|
|
.chain(canonical_blocks_post_finalization.into_iter())
|
|
|
|
.collect();
|
|
|
|
assert_eq!(
|
2020-08-26 09:24:55 +00:00
|
|
|
rig.get_finalized_checkpoints(),
|
|
|
|
hashset! {
|
|
|
|
canonical_blocks[&(rig.epoch_start_slot(1)-1).into()],
|
|
|
|
canonical_blocks[&rig.epoch_start_slot(2).into()],
|
|
|
|
},
|
2020-04-20 09:59:56 +00:00
|
|
|
);
|
|
|
|
|
|
|
|
for (&slot, &state_hash) in &stray_states {
|
|
|
|
assert!(
|
2020-08-26 09:24:55 +00:00
|
|
|
!rig.hot_state_exists(state_hash),
|
|
|
|
"stray state {} at slot {} should have been pruned",
|
|
|
|
state_hash,
|
|
|
|
slot
|
|
|
|
);
|
|
|
|
assert!(
|
|
|
|
!rig.cold_state_exists(state_hash),
|
|
|
|
"stray state {} at slot {} should have been pruned",
|
2020-04-20 09:59:56 +00:00
|
|
|
state_hash,
|
|
|
|
slot
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
assert!(rig.is_skipped_slot(&stray_state, skipped_slot));
|
|
|
|
{
|
|
|
|
let state_hash: BeaconStateHash =
|
|
|
|
(*stray_state.get_state_root(skipped_slot).unwrap()).into();
|
2020-04-20 09:59:56 +00:00
|
|
|
assert!(
|
2020-08-26 09:24:55 +00:00
|
|
|
!rig.hot_state_exists(state_hash),
|
2020-05-06 11:42:56 +00:00
|
|
|
"skipped slot {} state {} should have been pruned",
|
2020-08-26 09:24:55 +00:00
|
|
|
skipped_slot,
|
2020-05-06 11:42:56 +00:00
|
|
|
state_hash
|
2020-04-20 09:59:56 +00:00
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-26 00:01:06 +00:00
|
|
|
fn check_all_blocks_exist<'a>(
|
|
|
|
harness: &TestHarness,
|
|
|
|
blocks: impl Iterator<Item = &'a SignedBeaconBlockHash>,
|
|
|
|
) {
|
|
|
|
for &block_hash in blocks {
|
|
|
|
let block = harness.chain.get_block(&block_hash.into()).unwrap();
|
|
|
|
assert!(
|
|
|
|
block.is_some(),
|
|
|
|
"expected block {:?} to be in DB",
|
|
|
|
block_hash
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn check_all_states_exist<'a>(
|
|
|
|
harness: &TestHarness,
|
|
|
|
states: impl Iterator<Item = &'a BeaconStateHash>,
|
|
|
|
) {
|
|
|
|
for &state_hash in states {
|
|
|
|
let state = harness.chain.get_state(&state_hash.into(), None).unwrap();
|
|
|
|
assert!(
|
|
|
|
state.is_some(),
|
|
|
|
"expected state {:?} to be in DB",
|
|
|
|
state_hash,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that none of the given states exist in the database.
|
|
|
|
fn check_no_states_exist<'a>(
|
|
|
|
harness: &TestHarness,
|
|
|
|
states: impl Iterator<Item = &'a BeaconStateHash>,
|
|
|
|
) {
|
|
|
|
for &state_root in states {
|
|
|
|
assert!(
|
|
|
|
harness
|
|
|
|
.chain
|
|
|
|
.get_state(&state_root.into(), None)
|
|
|
|
.unwrap()
|
|
|
|
.is_none(),
|
|
|
|
"state {:?} should not be in the DB",
|
|
|
|
state_root
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that none of the given blocks exist in the database.
|
|
|
|
fn check_no_blocks_exist<'a>(
|
|
|
|
harness: &TestHarness,
|
|
|
|
blocks: impl Iterator<Item = &'a SignedBeaconBlockHash>,
|
|
|
|
) {
|
|
|
|
for &block_hash in blocks {
|
|
|
|
let block = harness.chain.get_block(&block_hash.into()).unwrap();
|
|
|
|
assert!(
|
|
|
|
block.is_none(),
|
|
|
|
"did not expect block {:?} to be in the DB",
|
|
|
|
block_hash
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn prune_single_block_fork() {
|
2020-08-26 09:24:55 +00:00
|
|
|
let slots_per_epoch = E::slots_per_epoch();
|
2020-08-26 00:01:06 +00:00
|
|
|
pruning_test(3 * slots_per_epoch, 1, slots_per_epoch, 0, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn prune_single_block_long_skip() {
|
2020-08-26 09:24:55 +00:00
|
|
|
let slots_per_epoch = E::slots_per_epoch();
|
2020-08-26 00:01:06 +00:00
|
|
|
pruning_test(
|
|
|
|
2 * slots_per_epoch,
|
|
|
|
1,
|
2020-08-26 09:24:55 +00:00
|
|
|
2 * slots_per_epoch,
|
2020-08-26 00:01:06 +00:00
|
|
|
2 * slots_per_epoch as u64,
|
|
|
|
1,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn prune_shared_skip_states_mid_epoch() {
|
2020-08-26 09:24:55 +00:00
|
|
|
let slots_per_epoch = E::slots_per_epoch();
|
2020-08-26 00:01:06 +00:00
|
|
|
pruning_test(
|
|
|
|
slots_per_epoch + slots_per_epoch / 2,
|
|
|
|
1,
|
|
|
|
slots_per_epoch,
|
|
|
|
2,
|
|
|
|
slots_per_epoch - 1,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn prune_shared_skip_states_epoch_boundaries() {
|
2020-08-26 09:24:55 +00:00
|
|
|
let slots_per_epoch = E::slots_per_epoch();
|
2020-08-26 00:01:06 +00:00
|
|
|
pruning_test(slots_per_epoch - 1, 1, slots_per_epoch, 2, slots_per_epoch);
|
|
|
|
pruning_test(slots_per_epoch - 1, 2, slots_per_epoch, 1, slots_per_epoch);
|
|
|
|
pruning_test(
|
|
|
|
2 * slots_per_epoch + slots_per_epoch / 2,
|
|
|
|
slots_per_epoch as u64 / 2,
|
|
|
|
slots_per_epoch,
|
|
|
|
slots_per_epoch as u64 / 2 + 1,
|
|
|
|
slots_per_epoch,
|
|
|
|
);
|
|
|
|
pruning_test(
|
|
|
|
2 * slots_per_epoch + slots_per_epoch / 2,
|
|
|
|
slots_per_epoch as u64 / 2,
|
|
|
|
slots_per_epoch,
|
|
|
|
slots_per_epoch as u64 / 2 + 1,
|
|
|
|
slots_per_epoch,
|
|
|
|
);
|
|
|
|
pruning_test(
|
|
|
|
2 * slots_per_epoch - 1,
|
|
|
|
slots_per_epoch as u64,
|
|
|
|
1,
|
|
|
|
0,
|
|
|
|
2 * slots_per_epoch,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Generic harness for pruning tests.
|
|
|
|
fn pruning_test(
|
|
|
|
// Number of blocks to start the chain with before forking.
|
2020-08-26 09:24:55 +00:00
|
|
|
num_initial_blocks: u64,
|
2020-08-26 00:01:06 +00:00
|
|
|
// Number of skip slots on the main chain after the initial blocks.
|
|
|
|
num_canonical_skips: u64,
|
|
|
|
// Number of blocks on the main chain after the skip, but before the finalisation-triggering
|
|
|
|
// blocks.
|
2020-08-26 09:24:55 +00:00
|
|
|
num_canonical_middle_blocks: u64,
|
2020-08-26 00:01:06 +00:00
|
|
|
// Number of skip slots on the fork chain after the initial blocks.
|
|
|
|
num_fork_skips: u64,
|
|
|
|
// Number of blocks on the fork chain after the skips.
|
2020-08-26 09:24:55 +00:00
|
|
|
num_fork_blocks: u64,
|
2020-08-26 00:01:06 +00:00
|
|
|
) {
|
|
|
|
const VALIDATOR_COUNT: usize = 24;
|
|
|
|
const VALIDATOR_SUPERMAJORITY: usize = (VALIDATOR_COUNT / 3) * 2;
|
|
|
|
const HONEST_VALIDATOR_COUNT: usize = VALIDATOR_SUPERMAJORITY;
|
|
|
|
|
|
|
|
let db_path = tempdir().unwrap();
|
|
|
|
let store = get_store(&db_path);
|
2020-10-19 05:58:39 +00:00
|
|
|
let harness = get_harness(store.clone(), VALIDATOR_COUNT);
|
2020-08-26 00:01:06 +00:00
|
|
|
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
|
|
|
let faulty_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
let slots = |start: Slot, num_blocks: u64| -> Vec<Slot> {
|
|
|
|
(start.as_u64()..start.as_u64() + num_blocks)
|
|
|
|
.map(Slot::new)
|
|
|
|
.collect()
|
|
|
|
};
|
2020-08-26 00:01:06 +00:00
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
let start_slot = Slot::new(1);
|
|
|
|
let divergence_slot = start_slot + num_initial_blocks;
|
2021-03-17 05:09:57 +00:00
|
|
|
let (state, state_root) = harness.get_current_state_and_root();
|
2020-08-26 09:24:55 +00:00
|
|
|
let (_, _, _, divergence_state) = harness.add_attested_blocks_at_slots(
|
2021-03-17 05:09:57 +00:00
|
|
|
state,
|
|
|
|
state_root,
|
2020-08-26 09:24:55 +00:00
|
|
|
&slots(start_slot, num_initial_blocks)[..],
|
2020-08-26 00:01:06 +00:00
|
|
|
&honest_validators,
|
|
|
|
);
|
|
|
|
|
2020-08-26 09:24:55 +00:00
|
|
|
let mut chains = harness.add_blocks_on_multiple_chains(vec![
|
|
|
|
// Canonical chain
|
|
|
|
(
|
|
|
|
divergence_state.clone(),
|
|
|
|
slots(
|
|
|
|
divergence_slot + num_canonical_skips,
|
|
|
|
num_canonical_middle_blocks,
|
|
|
|
),
|
|
|
|
honest_validators.clone(),
|
|
|
|
),
|
|
|
|
// Fork chain
|
|
|
|
(
|
|
|
|
divergence_state.clone(),
|
|
|
|
slots(divergence_slot + num_fork_skips, num_fork_blocks),
|
|
|
|
faulty_validators,
|
|
|
|
),
|
|
|
|
]);
|
2021-03-17 05:09:57 +00:00
|
|
|
let (_, _, _, mut canonical_state) = chains.remove(0);
|
2020-08-26 09:24:55 +00:00
|
|
|
let (stray_blocks, stray_states, _, stray_head_state) = chains.remove(0);
|
|
|
|
|
|
|
|
let stray_head_slot = divergence_slot + num_fork_skips + num_fork_blocks - 1;
|
|
|
|
let stray_head_state_root = stray_states[&stray_head_slot];
|
2020-08-26 00:01:06 +00:00
|
|
|
let stray_states = harness
|
|
|
|
.chain
|
|
|
|
.rev_iter_state_roots_from(stray_head_state_root.into(), &stray_head_state)
|
|
|
|
.map(Result::unwrap)
|
|
|
|
.map(|(state_root, _)| state_root.into())
|
|
|
|
.collect::<HashSet<_>>();
|
|
|
|
|
|
|
|
check_all_blocks_exist(&harness, stray_blocks.values());
|
|
|
|
check_all_states_exist(&harness, stray_states.iter());
|
|
|
|
|
|
|
|
let chain_dump = harness.chain.chain_dump().unwrap();
|
|
|
|
assert_eq!(
|
|
|
|
get_finalized_epoch_boundary_blocks(&chain_dump),
|
|
|
|
vec![Hash256::zero().into()].into_iter().collect(),
|
|
|
|
);
|
|
|
|
|
|
|
|
// Trigger finalization
|
2020-08-26 09:24:55 +00:00
|
|
|
let num_finalization_blocks = 4 * E::slots_per_epoch();
|
|
|
|
let canonical_slot = divergence_slot + num_canonical_skips + num_canonical_middle_blocks;
|
2021-03-17 05:09:57 +00:00
|
|
|
let canonical_state_root = canonical_state.update_tree_hash_cache().unwrap();
|
2020-08-26 09:24:55 +00:00
|
|
|
harness.add_attested_blocks_at_slots(
|
2020-08-26 00:01:06 +00:00
|
|
|
canonical_state,
|
2021-03-17 05:09:57 +00:00
|
|
|
canonical_state_root,
|
2020-08-26 09:24:55 +00:00
|
|
|
&slots(canonical_slot, num_finalization_blocks),
|
2020-08-26 00:01:06 +00:00
|
|
|
&honest_validators,
|
|
|
|
);
|
|
|
|
|
|
|
|
// Check that finalization has advanced past the divergence slot.
|
|
|
|
assert!(
|
|
|
|
harness
|
|
|
|
.chain
|
|
|
|
.head_info()
|
|
|
|
.unwrap()
|
|
|
|
.finalized_checkpoint
|
|
|
|
.epoch
|
|
|
|
.start_slot(E::slots_per_epoch())
|
|
|
|
> divergence_slot
|
|
|
|
);
|
|
|
|
check_chain_dump(
|
|
|
|
&harness,
|
|
|
|
(num_initial_blocks + num_canonical_middle_blocks + num_finalization_blocks + 1) as u64,
|
|
|
|
);
|
|
|
|
|
|
|
|
let all_canonical_states = harness
|
|
|
|
.chain
|
2021-07-06 02:38:53 +00:00
|
|
|
.forwards_iter_state_roots(Slot::new(0))
|
2020-08-26 00:01:06 +00:00
|
|
|
.unwrap()
|
|
|
|
.map(Result::unwrap)
|
|
|
|
.map(|(state_root, _)| state_root.into())
|
|
|
|
.collect::<HashSet<BeaconStateHash>>();
|
|
|
|
|
|
|
|
check_all_states_exist(&harness, all_canonical_states.iter());
|
|
|
|
check_no_states_exist(&harness, stray_states.difference(&all_canonical_states));
|
|
|
|
check_no_blocks_exist(&harness, stray_blocks.values());
|
|
|
|
}
|
|
|
|
|
Implement database temp states to reduce memory usage (#1798)
## Issue Addressed
Closes #800
Closes #1713
## Proposed Changes
Implement the temporary state storage algorithm described in #800. Specifically:
* Add `DBColumn::BeaconStateTemporary`, for storing 0-length temporary marker values.
* Store intermediate states immediately as they are created, marked temporary. Delete the temporary flag if the block is processed successfully.
* Add a garbage collection process to delete leftover temporary states on start-up.
* Bump the database schema version to 2 so that a DB with temporary states can't accidentally be used with older versions of the software. The auto-migration is a no-op, but puts in place some infra that we can use for future migrations (e.g. #1784)
## Additional Info
There are two known race conditions, one potentially causing permanent faults (hopefully rare), and the other insignificant.
### Race 1: Permanent state marked temporary
EDIT: this has been fixed by the addition of a lock around the relevant critical section
There are 2 threads that are trying to store 2 different blocks that share some intermediate states (e.g. they both skip some slots from the current head). Consider this sequence of events:
1. Thread 1 checks if state `s` already exists, and seeing that it doesn't, prepares an atomic commit of `(s, s_temporary_flag)`.
2. Thread 2 does the same, but also gets as far as committing the state txn, finishing the processing of its block, and _deleting_ the temporary flag.
3. Thread 1 is (finally) scheduled again, and marks `s` as temporary with its transaction.
4.
a) The process is killed, or thread 1's block fails verification and the temp flag is not deleted. This is a permanent failure! Any attempt to load state `s` will fail... hope it isn't on the main chain! Alternatively (4b) happens...
b) Thread 1 finishes, and re-deletes the temporary flag. In this case the failure is transient, state `s` will disappear temporarily, but will come back once thread 1 finishes running.
I _hope_ that steps 1-3 only happen very rarely, and 4a even more rarely. It's hard to know
This once again begs the question of why we're using LevelDB (#483), when it clearly doesn't care about atomicity! A ham-fisted fix would be to wrap the hot and cold DBs in locks, which would bring us closer to how other DBs handle read-write transactions. E.g. [LMDB only allows one R/W transaction at a time](https://docs.rs/lmdb/0.8.0/lmdb/struct.Environment.html#method.begin_rw_txn).
### Race 2: Temporary state returned from `get_state`
I don't think this race really matters, but in `load_hot_state`, if another thread stores a state between when we call `load_state_temporary_flag` and when we call `load_hot_state_summary`, then we could end up returning that state even though it's only a temporary state. I can't think of any case where this would be relevant, and I suspect if it did come up, it would be safe/recoverable (having data is safer than _not_ having data).
This could be fixed by using a LevelDB read snapshot, but that would require substantial changes to how we read all our values, so I don't think it's worth it right now.
2020-10-23 01:27:51 +00:00
|
|
|
#[test]
|
|
|
|
fn garbage_collect_temp_states_from_failed_block() {
|
|
|
|
let db_path = tempdir().unwrap();
|
|
|
|
let store = get_store(&db_path);
|
|
|
|
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
|
|
|
|
let slots_per_epoch = E::slots_per_epoch();
|
|
|
|
|
|
|
|
let genesis_state = harness.get_current_state();
|
|
|
|
let block_slot = Slot::new(2 * slots_per_epoch);
|
2021-07-09 06:15:32 +00:00
|
|
|
let (signed_block, state) = harness.make_block(genesis_state, block_slot);
|
|
|
|
|
|
|
|
let (mut block, _) = signed_block.deconstruct();
|
Implement database temp states to reduce memory usage (#1798)
## Issue Addressed
Closes #800
Closes #1713
## Proposed Changes
Implement the temporary state storage algorithm described in #800. Specifically:
* Add `DBColumn::BeaconStateTemporary`, for storing 0-length temporary marker values.
* Store intermediate states immediately as they are created, marked temporary. Delete the temporary flag if the block is processed successfully.
* Add a garbage collection process to delete leftover temporary states on start-up.
* Bump the database schema version to 2 so that a DB with temporary states can't accidentally be used with older versions of the software. The auto-migration is a no-op, but puts in place some infra that we can use for future migrations (e.g. #1784)
## Additional Info
There are two known race conditions, one potentially causing permanent faults (hopefully rare), and the other insignificant.
### Race 1: Permanent state marked temporary
EDIT: this has been fixed by the addition of a lock around the relevant critical section
There are 2 threads that are trying to store 2 different blocks that share some intermediate states (e.g. they both skip some slots from the current head). Consider this sequence of events:
1. Thread 1 checks if state `s` already exists, and seeing that it doesn't, prepares an atomic commit of `(s, s_temporary_flag)`.
2. Thread 2 does the same, but also gets as far as committing the state txn, finishing the processing of its block, and _deleting_ the temporary flag.
3. Thread 1 is (finally) scheduled again, and marks `s` as temporary with its transaction.
4.
a) The process is killed, or thread 1's block fails verification and the temp flag is not deleted. This is a permanent failure! Any attempt to load state `s` will fail... hope it isn't on the main chain! Alternatively (4b) happens...
b) Thread 1 finishes, and re-deletes the temporary flag. In this case the failure is transient, state `s` will disappear temporarily, but will come back once thread 1 finishes running.
I _hope_ that steps 1-3 only happen very rarely, and 4a even more rarely. It's hard to know
This once again begs the question of why we're using LevelDB (#483), when it clearly doesn't care about atomicity! A ham-fisted fix would be to wrap the hot and cold DBs in locks, which would bring us closer to how other DBs handle read-write transactions. E.g. [LMDB only allows one R/W transaction at a time](https://docs.rs/lmdb/0.8.0/lmdb/struct.Environment.html#method.begin_rw_txn).
### Race 2: Temporary state returned from `get_state`
I don't think this race really matters, but in `load_hot_state`, if another thread stores a state between when we call `load_state_temporary_flag` and when we call `load_hot_state_summary`, then we could end up returning that state even though it's only a temporary state. I can't think of any case where this would be relevant, and I suspect if it did come up, it would be safe/recoverable (having data is safer than _not_ having data).
This could be fixed by using a LevelDB read snapshot, but that would require substantial changes to how we read all our values, so I don't think it's worth it right now.
2020-10-23 01:27:51 +00:00
|
|
|
|
|
|
|
// Mutate the block to make it invalid, and re-sign it.
|
2021-07-09 06:15:32 +00:00
|
|
|
*block.state_root_mut() = Hash256::repeat_byte(0xff);
|
|
|
|
let proposer_index = block.proposer_index() as usize;
|
|
|
|
let block = block.sign(
|
Implement database temp states to reduce memory usage (#1798)
## Issue Addressed
Closes #800
Closes #1713
## Proposed Changes
Implement the temporary state storage algorithm described in #800. Specifically:
* Add `DBColumn::BeaconStateTemporary`, for storing 0-length temporary marker values.
* Store intermediate states immediately as they are created, marked temporary. Delete the temporary flag if the block is processed successfully.
* Add a garbage collection process to delete leftover temporary states on start-up.
* Bump the database schema version to 2 so that a DB with temporary states can't accidentally be used with older versions of the software. The auto-migration is a no-op, but puts in place some infra that we can use for future migrations (e.g. #1784)
## Additional Info
There are two known race conditions, one potentially causing permanent faults (hopefully rare), and the other insignificant.
### Race 1: Permanent state marked temporary
EDIT: this has been fixed by the addition of a lock around the relevant critical section
There are 2 threads that are trying to store 2 different blocks that share some intermediate states (e.g. they both skip some slots from the current head). Consider this sequence of events:
1. Thread 1 checks if state `s` already exists, and seeing that it doesn't, prepares an atomic commit of `(s, s_temporary_flag)`.
2. Thread 2 does the same, but also gets as far as committing the state txn, finishing the processing of its block, and _deleting_ the temporary flag.
3. Thread 1 is (finally) scheduled again, and marks `s` as temporary with its transaction.
4.
a) The process is killed, or thread 1's block fails verification and the temp flag is not deleted. This is a permanent failure! Any attempt to load state `s` will fail... hope it isn't on the main chain! Alternatively (4b) happens...
b) Thread 1 finishes, and re-deletes the temporary flag. In this case the failure is transient, state `s` will disappear temporarily, but will come back once thread 1 finishes running.
I _hope_ that steps 1-3 only happen very rarely, and 4a even more rarely. It's hard to know
This once again begs the question of why we're using LevelDB (#483), when it clearly doesn't care about atomicity! A ham-fisted fix would be to wrap the hot and cold DBs in locks, which would bring us closer to how other DBs handle read-write transactions. E.g. [LMDB only allows one R/W transaction at a time](https://docs.rs/lmdb/0.8.0/lmdb/struct.Environment.html#method.begin_rw_txn).
### Race 2: Temporary state returned from `get_state`
I don't think this race really matters, but in `load_hot_state`, if another thread stores a state between when we call `load_state_temporary_flag` and when we call `load_hot_state_summary`, then we could end up returning that state even though it's only a temporary state. I can't think of any case where this would be relevant, and I suspect if it did come up, it would be safe/recoverable (having data is safer than _not_ having data).
This could be fixed by using a LevelDB read snapshot, but that would require substantial changes to how we read all our values, so I don't think it's worth it right now.
2020-10-23 01:27:51 +00:00
|
|
|
&harness.validator_keypairs[proposer_index].sk,
|
2021-07-09 06:15:32 +00:00
|
|
|
&state.fork(),
|
|
|
|
state.genesis_validators_root(),
|
Implement database temp states to reduce memory usage (#1798)
## Issue Addressed
Closes #800
Closes #1713
## Proposed Changes
Implement the temporary state storage algorithm described in #800. Specifically:
* Add `DBColumn::BeaconStateTemporary`, for storing 0-length temporary marker values.
* Store intermediate states immediately as they are created, marked temporary. Delete the temporary flag if the block is processed successfully.
* Add a garbage collection process to delete leftover temporary states on start-up.
* Bump the database schema version to 2 so that a DB with temporary states can't accidentally be used with older versions of the software. The auto-migration is a no-op, but puts in place some infra that we can use for future migrations (e.g. #1784)
## Additional Info
There are two known race conditions, one potentially causing permanent faults (hopefully rare), and the other insignificant.
### Race 1: Permanent state marked temporary
EDIT: this has been fixed by the addition of a lock around the relevant critical section
There are 2 threads that are trying to store 2 different blocks that share some intermediate states (e.g. they both skip some slots from the current head). Consider this sequence of events:
1. Thread 1 checks if state `s` already exists, and seeing that it doesn't, prepares an atomic commit of `(s, s_temporary_flag)`.
2. Thread 2 does the same, but also gets as far as committing the state txn, finishing the processing of its block, and _deleting_ the temporary flag.
3. Thread 1 is (finally) scheduled again, and marks `s` as temporary with its transaction.
4.
a) The process is killed, or thread 1's block fails verification and the temp flag is not deleted. This is a permanent failure! Any attempt to load state `s` will fail... hope it isn't on the main chain! Alternatively (4b) happens...
b) Thread 1 finishes, and re-deletes the temporary flag. In this case the failure is transient, state `s` will disappear temporarily, but will come back once thread 1 finishes running.
I _hope_ that steps 1-3 only happen very rarely, and 4a even more rarely. It's hard to know
This once again begs the question of why we're using LevelDB (#483), when it clearly doesn't care about atomicity! A ham-fisted fix would be to wrap the hot and cold DBs in locks, which would bring us closer to how other DBs handle read-write transactions. E.g. [LMDB only allows one R/W transaction at a time](https://docs.rs/lmdb/0.8.0/lmdb/struct.Environment.html#method.begin_rw_txn).
### Race 2: Temporary state returned from `get_state`
I don't think this race really matters, but in `load_hot_state`, if another thread stores a state between when we call `load_state_temporary_flag` and when we call `load_hot_state_summary`, then we could end up returning that state even though it's only a temporary state. I can't think of any case where this would be relevant, and I suspect if it did come up, it would be safe/recoverable (having data is safer than _not_ having data).
This could be fixed by using a LevelDB read snapshot, but that would require substantial changes to how we read all our values, so I don't think it's worth it right now.
2020-10-23 01:27:51 +00:00
|
|
|
&harness.spec,
|
|
|
|
);
|
|
|
|
|
|
|
|
// The block should be rejected, but should store a bunch of temporary states.
|
|
|
|
harness.set_current_slot(block_slot);
|
|
|
|
harness.process_block_result(block).unwrap_err();
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
store.iter_temporary_state_roots().count(),
|
|
|
|
block_slot.as_usize() - 1
|
|
|
|
);
|
|
|
|
|
|
|
|
drop(harness);
|
|
|
|
drop(store);
|
|
|
|
|
|
|
|
// On startup, the store should garbage collect all the temporary states.
|
|
|
|
let store = get_store(&db_path);
|
|
|
|
assert_eq!(store.iter_temporary_state_roots().count(), 0);
|
|
|
|
}
|
|
|
|
|
2021-09-22 00:37:28 +00:00
|
|
|
#[test]
|
|
|
|
fn weak_subjectivity_sync() {
|
|
|
|
// Build an initial chain on one harness, representing a synced node with full history.
|
|
|
|
let num_initial_blocks = E::slots_per_epoch() * 11;
|
|
|
|
let num_final_blocks = E::slots_per_epoch() * 2;
|
|
|
|
|
|
|
|
let temp1 = tempdir().unwrap();
|
|
|
|
let full_store = get_store(&temp1);
|
|
|
|
let harness = get_harness(full_store.clone(), LOW_VALIDATOR_COUNT);
|
|
|
|
|
|
|
|
harness.extend_chain(
|
|
|
|
num_initial_blocks as usize,
|
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
AttestationStrategy::AllValidators,
|
|
|
|
);
|
|
|
|
|
|
|
|
let genesis_state = full_store
|
|
|
|
.get_state(&harness.chain.genesis_state_root, Some(Slot::new(0)))
|
|
|
|
.unwrap()
|
|
|
|
.unwrap();
|
|
|
|
let wss_checkpoint = harness.chain.head_info().unwrap().finalized_checkpoint;
|
|
|
|
let wss_block = harness.get_block(wss_checkpoint.root.into()).unwrap();
|
|
|
|
let wss_state = full_store
|
|
|
|
.get_state(&wss_block.state_root(), None)
|
|
|
|
.unwrap()
|
|
|
|
.unwrap();
|
|
|
|
let wss_slot = wss_block.slot();
|
|
|
|
|
|
|
|
// Add more blocks that advance finalization further.
|
|
|
|
harness.advance_slot();
|
|
|
|
harness.extend_chain(
|
|
|
|
num_final_blocks as usize,
|
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
AttestationStrategy::AllValidators,
|
|
|
|
);
|
|
|
|
|
|
|
|
let (shutdown_tx, _shutdown_rx) = futures::channel::mpsc::channel(1);
|
|
|
|
let log = test_logger();
|
|
|
|
let temp2 = tempdir().unwrap();
|
|
|
|
let store = get_store(&temp2);
|
|
|
|
|
|
|
|
// Initialise a new beacon chain from the finalized checkpoint
|
|
|
|
let beacon_chain = BeaconChainBuilder::new(MinimalEthSpec)
|
|
|
|
.store(store.clone())
|
|
|
|
.custom_spec(test_spec::<E>())
|
|
|
|
.weak_subjectivity_state(wss_state, wss_block.clone(), genesis_state)
|
|
|
|
.unwrap()
|
|
|
|
.logger(log.clone())
|
|
|
|
.store_migrator_config(MigratorConfig::default().blocking())
|
|
|
|
.dummy_eth1_backend()
|
|
|
|
.expect("should build dummy backend")
|
|
|
|
.testing_slot_clock(HARNESS_SLOT_TIME)
|
|
|
|
.expect("should configure testing slot clock")
|
|
|
|
.shutdown_sender(shutdown_tx)
|
|
|
|
.chain_config(ChainConfig::default())
|
|
|
|
.event_handler(Some(ServerSentEventHandler::new_with_capacity(
|
|
|
|
log.clone(),
|
|
|
|
1,
|
|
|
|
)))
|
|
|
|
.monitor_validators(true, vec![], log)
|
|
|
|
.build()
|
|
|
|
.expect("should build");
|
|
|
|
|
|
|
|
// Apply blocks forward to reach head.
|
|
|
|
let chain_dump = harness.chain.chain_dump().unwrap();
|
|
|
|
let new_blocks = &chain_dump[wss_slot.as_usize() + 1..];
|
|
|
|
|
|
|
|
assert_eq!(new_blocks[0].beacon_block.slot(), wss_slot + 1);
|
|
|
|
|
|
|
|
for snapshot in new_blocks {
|
|
|
|
let block = &snapshot.beacon_block;
|
|
|
|
beacon_chain.slot_clock.set_slot(block.slot().as_u64());
|
|
|
|
beacon_chain.process_block(block.clone()).unwrap();
|
|
|
|
beacon_chain.fork_choice().unwrap();
|
|
|
|
|
|
|
|
// Check that the new block's state can be loaded correctly.
|
|
|
|
let state_root = block.state_root();
|
|
|
|
let mut state = beacon_chain
|
|
|
|
.store
|
|
|
|
.get_state(&state_root, Some(block.slot()))
|
|
|
|
.unwrap()
|
|
|
|
.unwrap();
|
|
|
|
assert_eq!(state.update_tree_hash_cache().unwrap(), state_root);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Forwards iterator from 0 should fail as we lack blocks.
|
|
|
|
assert!(matches!(
|
|
|
|
beacon_chain.forwards_iter_block_roots(Slot::new(0)),
|
|
|
|
Err(BeaconChainError::HistoricalBlockError(
|
|
|
|
HistoricalBlockError::BlockOutOfRange { .. }
|
|
|
|
))
|
|
|
|
));
|
|
|
|
|
|
|
|
// Simulate processing of a `StatusMessage` with an older finalized epoch by calling
|
|
|
|
// `block_root_at_slot` with an old slot for which we don't know the block root. It should
|
|
|
|
// return `None` rather than erroring.
|
|
|
|
assert_eq!(
|
|
|
|
beacon_chain
|
|
|
|
.block_root_at_slot(Slot::new(1), WhenSlotSkipped::None)
|
|
|
|
.unwrap(),
|
|
|
|
None
|
|
|
|
);
|
|
|
|
|
|
|
|
// Simulate querying the API for a historic state that is unknown. It should also return
|
|
|
|
// `None` rather than erroring.
|
|
|
|
assert_eq!(beacon_chain.state_root_at_slot(Slot::new(1)).unwrap(), None);
|
|
|
|
|
|
|
|
// Supply blocks backwards to reach genesis. Omit the genesis block to check genesis handling.
|
|
|
|
let historical_blocks = chain_dump[..wss_block.slot().as_usize()]
|
|
|
|
.iter()
|
|
|
|
.filter(|s| s.beacon_block.slot() != 0)
|
|
|
|
.map(|s| s.beacon_block.clone())
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
beacon_chain
|
|
|
|
.import_historical_block_batch(&historical_blocks)
|
|
|
|
.unwrap();
|
|
|
|
assert_eq!(beacon_chain.store.get_oldest_block_slot(), 0);
|
|
|
|
|
|
|
|
// Resupplying the blocks should not fail, they can be safely ignored.
|
|
|
|
beacon_chain
|
|
|
|
.import_historical_block_batch(&historical_blocks)
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
// The forwards iterator should now match the original chain
|
|
|
|
let forwards = beacon_chain
|
|
|
|
.forwards_iter_block_roots(Slot::new(0))
|
|
|
|
.unwrap()
|
|
|
|
.map(Result::unwrap)
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
let expected = harness
|
|
|
|
.chain
|
|
|
|
.forwards_iter_block_roots(Slot::new(0))
|
|
|
|
.unwrap()
|
|
|
|
.map(Result::unwrap)
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
assert_eq!(forwards, expected);
|
|
|
|
|
|
|
|
// All blocks can be loaded.
|
|
|
|
for (block_root, slot) in beacon_chain
|
|
|
|
.forwards_iter_block_roots(Slot::new(0))
|
|
|
|
.unwrap()
|
|
|
|
.map(Result::unwrap)
|
|
|
|
{
|
|
|
|
let block = store.get_block(&block_root).unwrap().unwrap();
|
|
|
|
assert_eq!(block.slot(), slot);
|
|
|
|
}
|
|
|
|
|
|
|
|
// All states from the oldest state slot can be loaded.
|
|
|
|
let (_, oldest_state_slot) = store.get_historic_state_limits();
|
|
|
|
for (state_root, slot) in beacon_chain
|
|
|
|
.forwards_iter_state_roots(oldest_state_slot)
|
|
|
|
.unwrap()
|
|
|
|
.map(Result::unwrap)
|
|
|
|
{
|
|
|
|
let state = store.get_state(&state_root, Some(slot)).unwrap().unwrap();
|
|
|
|
assert_eq!(state.slot(), slot);
|
|
|
|
assert_eq!(state.canonical_root(), state_root);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Anchor slot is still set to the starting slot.
|
|
|
|
assert_eq!(store.get_anchor_slot(), Some(wss_slot));
|
|
|
|
|
|
|
|
// Reconstruct states.
|
|
|
|
store.clone().reconstruct_historic_states().unwrap();
|
|
|
|
assert_eq!(store.get_anchor_slot(), None);
|
|
|
|
}
|
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
#[test]
|
|
|
|
fn finalizes_after_resuming_from_db() {
|
|
|
|
let validator_count = 16;
|
|
|
|
let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 8;
|
|
|
|
let first_half = num_blocks_produced / 2;
|
|
|
|
|
|
|
|
let db_path = tempdir().unwrap();
|
|
|
|
let store = get_store(&db_path);
|
|
|
|
|
|
|
|
let harness = BeaconChainHarness::new_with_disk_store(
|
|
|
|
MinimalEthSpec,
|
|
|
|
None,
|
|
|
|
store.clone(),
|
|
|
|
KEYPAIRS[0..validator_count].to_vec(),
|
|
|
|
);
|
|
|
|
|
|
|
|
harness.advance_slot();
|
|
|
|
|
|
|
|
harness.extend_chain(
|
|
|
|
first_half as usize,
|
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
AttestationStrategy::AllValidators,
|
|
|
|
);
|
|
|
|
|
|
|
|
assert!(
|
|
|
|
harness
|
|
|
|
.chain
|
|
|
|
.head()
|
|
|
|
.expect("should read head")
|
|
|
|
.beacon_state
|
|
|
|
.finalized_checkpoint()
|
|
|
|
.epoch
|
|
|
|
> 0,
|
|
|
|
"the chain should have already finalized"
|
|
|
|
);
|
|
|
|
|
|
|
|
let latest_slot = harness.chain.slot().expect("should have a slot");
|
|
|
|
|
|
|
|
harness
|
|
|
|
.chain
|
|
|
|
.persist_head_and_fork_choice()
|
|
|
|
.expect("should persist the head and fork choice");
|
|
|
|
harness
|
|
|
|
.chain
|
|
|
|
.persist_op_pool()
|
|
|
|
.expect("should persist the op pool");
|
|
|
|
harness
|
|
|
|
.chain
|
|
|
|
.persist_eth1_cache()
|
|
|
|
.expect("should persist the eth1 cache");
|
|
|
|
|
|
|
|
let original_chain = harness.chain;
|
|
|
|
|
|
|
|
let resumed_harness = BeaconChainHarness::resume_from_disk_store(
|
|
|
|
MinimalEthSpec,
|
|
|
|
None,
|
|
|
|
store,
|
|
|
|
KEYPAIRS[0..validator_count].to_vec(),
|
|
|
|
);
|
|
|
|
|
|
|
|
assert_chains_pretty_much_the_same(&original_chain, &resumed_harness.chain);
|
|
|
|
|
|
|
|
// Set the slot clock of the resumed harness to be in the slot following the previous harness.
|
|
|
|
//
|
|
|
|
// This allows us to produce the block at the next slot.
|
|
|
|
resumed_harness
|
|
|
|
.chain
|
|
|
|
.slot_clock
|
|
|
|
.set_slot(latest_slot.as_u64() + 1);
|
|
|
|
|
|
|
|
resumed_harness.extend_chain(
|
|
|
|
(num_blocks_produced - first_half) as usize,
|
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
AttestationStrategy::AllValidators,
|
|
|
|
);
|
|
|
|
|
|
|
|
let state = &resumed_harness
|
|
|
|
.chain
|
|
|
|
.head()
|
|
|
|
.expect("should read head")
|
|
|
|
.beacon_state;
|
|
|
|
assert_eq!(
|
|
|
|
state.slot(),
|
|
|
|
num_blocks_produced,
|
|
|
|
"head should be at the current slot"
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
state.current_epoch(),
|
|
|
|
num_blocks_produced / MinimalEthSpec::slots_per_epoch(),
|
|
|
|
"head should be at the expected epoch"
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
state.current_justified_checkpoint().epoch,
|
|
|
|
state.current_epoch() - 1,
|
|
|
|
"the head should be justified one behind the current epoch"
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
state.finalized_checkpoint().epoch,
|
|
|
|
state.current_epoch() - 2,
|
|
|
|
"the head should be finalized two behind the current epoch"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2021-08-30 06:41:31 +00:00
|
|
|
#[test]
|
|
|
|
fn revert_minority_fork_on_resume() {
|
|
|
|
let validator_count = 16;
|
|
|
|
let slots_per_epoch = MinimalEthSpec::slots_per_epoch();
|
|
|
|
|
|
|
|
let fork_epoch = Epoch::new(4);
|
|
|
|
let fork_slot = fork_epoch.start_slot(slots_per_epoch);
|
|
|
|
let initial_blocks = slots_per_epoch * fork_epoch.as_u64() - 1;
|
|
|
|
let post_fork_blocks = slots_per_epoch * 3;
|
|
|
|
|
|
|
|
let mut spec1 = MinimalEthSpec::default_spec();
|
|
|
|
spec1.altair_fork_epoch = None;
|
|
|
|
let mut spec2 = MinimalEthSpec::default_spec();
|
|
|
|
spec2.altair_fork_epoch = Some(fork_epoch);
|
|
|
|
|
|
|
|
let all_validators = (0..validator_count).collect::<Vec<usize>>();
|
|
|
|
|
|
|
|
// Chain with no fork epoch configured.
|
|
|
|
let db_path1 = tempdir().unwrap();
|
|
|
|
let store1 = get_store_with_spec(&db_path1, spec1.clone());
|
|
|
|
let harness1 = BeaconChainHarness::new_with_disk_store(
|
|
|
|
MinimalEthSpec,
|
|
|
|
Some(spec1),
|
|
|
|
store1,
|
|
|
|
KEYPAIRS[0..validator_count].to_vec(),
|
|
|
|
);
|
|
|
|
|
|
|
|
// Chain with fork epoch configured.
|
|
|
|
let db_path2 = tempdir().unwrap();
|
|
|
|
let store2 = get_store_with_spec(&db_path2, spec2.clone());
|
|
|
|
let harness2 = BeaconChainHarness::new_with_disk_store(
|
|
|
|
MinimalEthSpec,
|
|
|
|
Some(spec2.clone()),
|
|
|
|
store2,
|
|
|
|
KEYPAIRS[0..validator_count].to_vec(),
|
|
|
|
);
|
|
|
|
|
|
|
|
// Apply the same blocks to both chains initially.
|
|
|
|
let mut state = harness1.get_current_state();
|
|
|
|
let mut block_root = harness1.chain.genesis_block_root;
|
|
|
|
for slot in (1..=initial_blocks).map(Slot::new) {
|
|
|
|
let state_root = state.update_tree_hash_cache().unwrap();
|
|
|
|
|
|
|
|
let attestations = harness1.make_attestations(
|
|
|
|
&all_validators,
|
|
|
|
&state,
|
|
|
|
state_root,
|
|
|
|
block_root.into(),
|
|
|
|
slot,
|
|
|
|
);
|
|
|
|
harness1.set_current_slot(slot);
|
|
|
|
harness2.set_current_slot(slot);
|
|
|
|
harness1.process_attestations(attestations.clone());
|
|
|
|
harness2.process_attestations(attestations);
|
|
|
|
|
|
|
|
let (block, new_state) = harness1.make_block(state, slot);
|
|
|
|
|
|
|
|
harness1.process_block(slot, block.clone()).unwrap();
|
|
|
|
harness2.process_block(slot, block.clone()).unwrap();
|
|
|
|
|
|
|
|
state = new_state;
|
|
|
|
block_root = block.canonical_root();
|
|
|
|
}
|
|
|
|
|
|
|
|
assert_eq!(harness1.chain.head_info().unwrap().slot, fork_slot - 1);
|
|
|
|
assert_eq!(harness2.chain.head_info().unwrap().slot, fork_slot - 1);
|
|
|
|
|
|
|
|
// Fork the two chains.
|
|
|
|
let mut state1 = state.clone();
|
|
|
|
let mut state2 = state.clone();
|
|
|
|
|
|
|
|
let mut majority_blocks = vec![];
|
|
|
|
|
|
|
|
for i in 0..post_fork_blocks {
|
|
|
|
let slot = fork_slot + i;
|
|
|
|
|
|
|
|
// Attestations on majority chain.
|
|
|
|
let state_root = state.update_tree_hash_cache().unwrap();
|
|
|
|
|
|
|
|
let attestations = harness2.make_attestations(
|
|
|
|
&all_validators,
|
|
|
|
&state2,
|
|
|
|
state_root,
|
|
|
|
block_root.into(),
|
|
|
|
slot,
|
|
|
|
);
|
|
|
|
harness2.set_current_slot(slot);
|
|
|
|
harness2.process_attestations(attestations);
|
|
|
|
|
|
|
|
// Minority chain block (no attesters).
|
|
|
|
let (block1, new_state1) = harness1.make_block(state1, slot);
|
|
|
|
harness1.process_block(slot, block1).unwrap();
|
|
|
|
state1 = new_state1;
|
|
|
|
|
|
|
|
// Majority chain block (all attesters).
|
|
|
|
let (block2, new_state2) = harness2.make_block(state2, slot);
|
|
|
|
harness2.process_block(slot, block2.clone()).unwrap();
|
|
|
|
|
|
|
|
state2 = new_state2;
|
|
|
|
block_root = block2.canonical_root();
|
|
|
|
|
|
|
|
majority_blocks.push(block2);
|
|
|
|
}
|
|
|
|
|
|
|
|
let end_slot = fork_slot + post_fork_blocks - 1;
|
|
|
|
assert_eq!(harness1.chain.head_info().unwrap().slot, end_slot);
|
|
|
|
assert_eq!(harness2.chain.head_info().unwrap().slot, end_slot);
|
|
|
|
|
|
|
|
// Resume from disk with the hard-fork activated: this should revert the post-fork blocks.
|
|
|
|
// We have to do some hackery with the `slot_clock` so that the correct slot is set when
|
|
|
|
// the beacon chain builder loads the head block.
|
|
|
|
drop(harness1);
|
|
|
|
let resume_store = get_store_with_spec(&db_path1, spec2.clone());
|
|
|
|
let resumed_harness = BeaconChainHarness::new_with_mutator(
|
|
|
|
MinimalEthSpec,
|
|
|
|
spec2,
|
|
|
|
resume_store,
|
|
|
|
KEYPAIRS[0..validator_count].to_vec(),
|
|
|
|
ChainConfig::default(),
|
|
|
|
|mut builder| {
|
|
|
|
builder = builder
|
|
|
|
.resume_from_db()
|
|
|
|
.unwrap()
|
|
|
|
.testing_slot_clock(HARNESS_SLOT_TIME)
|
|
|
|
.unwrap();
|
|
|
|
builder
|
|
|
|
.get_slot_clock()
|
|
|
|
.unwrap()
|
|
|
|
.set_slot(end_slot.as_u64());
|
|
|
|
builder
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
|
|
|
// Head should now be just before the fork.
|
|
|
|
resumed_harness.chain.fork_choice().unwrap();
|
|
|
|
let head = resumed_harness.chain.head_info().unwrap();
|
|
|
|
assert_eq!(head.slot, fork_slot - 1);
|
|
|
|
|
|
|
|
// Head track should know the canonical head and the rogue head.
|
|
|
|
assert_eq!(resumed_harness.chain.heads().len(), 2);
|
|
|
|
assert!(resumed_harness.chain.knows_head(&head.block_root.into()));
|
|
|
|
|
|
|
|
// Apply blocks from the majority chain and trigger finalization.
|
|
|
|
let initial_split_slot = resumed_harness.chain.store.get_split_slot();
|
|
|
|
for block in &majority_blocks {
|
|
|
|
resumed_harness.process_block_result(block.clone()).unwrap();
|
|
|
|
|
|
|
|
// The canonical head should be the block from the majority chain.
|
|
|
|
resumed_harness.chain.fork_choice().unwrap();
|
|
|
|
let head_info = resumed_harness.chain.head_info().unwrap();
|
|
|
|
assert_eq!(head_info.slot, block.slot());
|
|
|
|
assert_eq!(head_info.block_root, block.canonical_root());
|
|
|
|
}
|
|
|
|
let advanced_split_slot = resumed_harness.chain.store.get_split_slot();
|
|
|
|
|
|
|
|
// Check that the migration ran successfully.
|
|
|
|
assert!(advanced_split_slot > initial_split_slot);
|
|
|
|
|
|
|
|
// Check that there is only a single head now matching harness2 (the minority chain is gone).
|
|
|
|
let heads = resumed_harness.chain.heads();
|
|
|
|
assert_eq!(heads, harness2.chain.heads());
|
|
|
|
assert_eq!(heads.len(), 1);
|
|
|
|
}
|
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
/// Checks that two chains are the same, for the purpose of these tests.
|
|
|
|
///
|
|
|
|
/// Several fields that are hard/impossible to check are ignored (e.g., the store).
|
|
|
|
fn assert_chains_pretty_much_the_same<T: BeaconChainTypes>(a: &BeaconChain<T>, b: &BeaconChain<T>) {
|
|
|
|
assert_eq!(a.spec, b.spec, "spec should be equal");
|
|
|
|
assert_eq!(a.op_pool, b.op_pool, "op_pool should be equal");
|
|
|
|
assert_eq!(
|
|
|
|
a.head().unwrap(),
|
|
|
|
b.head().unwrap(),
|
|
|
|
"head() should be equal"
|
|
|
|
);
|
|
|
|
assert_eq!(a.heads(), b.heads(), "heads() should be equal");
|
|
|
|
assert_eq!(
|
|
|
|
a.genesis_block_root, b.genesis_block_root,
|
|
|
|
"genesis_block_root should be equal"
|
|
|
|
);
|
|
|
|
|
|
|
|
let slot = a.slot().unwrap();
|
|
|
|
assert!(
|
|
|
|
a.fork_choice.write().get_head(slot).unwrap()
|
|
|
|
== b.fork_choice.write().get_head(slot).unwrap(),
|
|
|
|
"fork_choice heads should be equal"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2019-11-26 23:54:46 +00:00
|
|
|
/// Check that the head state's slot matches `expected_slot`.
|
|
|
|
fn check_slot(harness: &TestHarness, expected_slot: u64) {
|
2020-01-06 06:30:37 +00:00
|
|
|
let state = &harness.chain.head().expect("should get head").beacon_state;
|
2019-11-26 23:54:46 +00:00
|
|
|
|
|
|
|
assert_eq!(
|
2021-07-09 06:15:32 +00:00
|
|
|
state.slot(),
|
|
|
|
expected_slot,
|
2019-11-26 23:54:46 +00:00
|
|
|
"head should be at the current slot"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Check that the chain has finalized under best-case assumptions, and check the head slot.
|
|
|
|
fn check_finalization(harness: &TestHarness, expected_slot: u64) {
|
2020-01-06 06:30:37 +00:00
|
|
|
let state = &harness.chain.head().expect("should get head").beacon_state;
|
2019-11-26 23:54:46 +00:00
|
|
|
|
|
|
|
check_slot(harness, expected_slot);
|
|
|
|
|
|
|
|
assert_eq!(
|
2021-07-09 06:15:32 +00:00
|
|
|
state.current_justified_checkpoint().epoch,
|
2019-11-26 23:54:46 +00:00
|
|
|
state.current_epoch() - 1,
|
|
|
|
"the head should be justified one behind the current epoch"
|
|
|
|
);
|
|
|
|
assert_eq!(
|
2021-07-09 06:15:32 +00:00
|
|
|
state.finalized_checkpoint().epoch,
|
2019-11-26 23:54:46 +00:00
|
|
|
state.current_epoch() - 2,
|
|
|
|
"the head should be finalized two behind the current epoch"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2020-05-31 22:13:49 +00:00
|
|
|
/// Check that the HotColdDB's split_slot is equal to the start slot of the last finalized epoch.
|
2020-06-16 01:34:04 +00:00
|
|
|
fn check_split_slot(harness: &TestHarness, store: Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>>) {
|
2019-11-26 23:54:46 +00:00
|
|
|
let split_slot = store.get_split_slot();
|
|
|
|
assert_eq!(
|
|
|
|
harness
|
|
|
|
.chain
|
|
|
|
.head()
|
2020-01-06 06:30:37 +00:00
|
|
|
.expect("should get head")
|
2019-11-26 23:54:46 +00:00
|
|
|
.beacon_state
|
2021-07-09 06:15:32 +00:00
|
|
|
.finalized_checkpoint()
|
2019-11-26 23:54:46 +00:00
|
|
|
.epoch
|
|
|
|
.start_slot(E::slots_per_epoch()),
|
|
|
|
split_slot
|
|
|
|
);
|
|
|
|
assert_ne!(split_slot, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Check that all the states in a chain dump have the correct tree hash.
|
|
|
|
fn check_chain_dump(harness: &TestHarness, expected_len: u64) {
|
|
|
|
let chain_dump = harness.chain.chain_dump().unwrap();
|
|
|
|
|
|
|
|
assert_eq!(chain_dump.len() as u64, expected_len);
|
|
|
|
|
2019-12-06 07:52:11 +00:00
|
|
|
for checkpoint in &chain_dump {
|
2019-12-06 03:29:06 +00:00
|
|
|
// Check that the tree hash of the stored state is as expected
|
2019-11-26 23:54:46 +00:00
|
|
|
assert_eq!(
|
2021-02-15 07:17:52 +00:00
|
|
|
checkpoint.beacon_state_root(),
|
2020-03-04 21:07:27 +00:00
|
|
|
checkpoint.beacon_state.tree_hash_root(),
|
2019-11-26 23:54:46 +00:00
|
|
|
"tree hash of stored state is incorrect"
|
|
|
|
);
|
2019-12-06 03:29:06 +00:00
|
|
|
|
|
|
|
// Check that looking up the state root with no slot hint succeeds.
|
|
|
|
// This tests the state root -> slot mapping.
|
|
|
|
assert_eq!(
|
|
|
|
harness
|
|
|
|
.chain
|
|
|
|
.store
|
2021-02-15 07:17:52 +00:00
|
|
|
.get_state(&checkpoint.beacon_state_root(), None)
|
2019-12-06 03:29:06 +00:00
|
|
|
.expect("no error")
|
|
|
|
.expect("state exists")
|
2021-07-09 06:15:32 +00:00
|
|
|
.slot(),
|
|
|
|
checkpoint.beacon_state.slot()
|
2019-12-06 03:29:06 +00:00
|
|
|
);
|
2019-11-26 23:54:46 +00:00
|
|
|
}
|
2019-12-06 07:52:11 +00:00
|
|
|
|
|
|
|
// Check the forwards block roots iterator against the chain dump
|
|
|
|
let chain_dump_block_roots = chain_dump
|
|
|
|
.iter()
|
2020-02-10 23:19:36 +00:00
|
|
|
.map(|checkpoint| (checkpoint.beacon_block_root, checkpoint.beacon_block.slot()))
|
2019-12-06 07:52:11 +00:00
|
|
|
.collect::<Vec<_>>();
|
|
|
|
|
2021-07-06 02:38:53 +00:00
|
|
|
let mut forward_block_roots = harness
|
|
|
|
.chain
|
|
|
|
.forwards_iter_block_roots(Slot::new(0))
|
|
|
|
.expect("should get iter")
|
|
|
|
.map(Result::unwrap)
|
|
|
|
.collect::<Vec<_>>();
|
2019-12-06 07:52:11 +00:00
|
|
|
|
|
|
|
// Drop the block roots for skipped slots.
|
|
|
|
forward_block_roots.dedup_by_key(|(block_root, _)| *block_root);
|
|
|
|
|
|
|
|
for i in 0..std::cmp::max(chain_dump_block_roots.len(), forward_block_roots.len()) {
|
|
|
|
assert_eq!(
|
|
|
|
chain_dump_block_roots[i],
|
|
|
|
forward_block_roots[i],
|
|
|
|
"split slot is {}",
|
|
|
|
harness.chain.store.get_split_slot()
|
|
|
|
);
|
|
|
|
}
|
2019-11-26 23:54:46 +00:00
|
|
|
}
|
2019-12-06 03:29:06 +00:00
|
|
|
|
2020-08-26 00:01:06 +00:00
|
|
|
/// Check that every state from the canonical chain is in the database, and that the
|
|
|
|
/// reverse state and block root iterators reach genesis.
|
2019-12-06 03:29:06 +00:00
|
|
|
fn check_iterators(harness: &TestHarness) {
|
2021-07-06 02:38:53 +00:00
|
|
|
let mut max_slot = None;
|
2020-08-26 00:01:06 +00:00
|
|
|
for (state_root, slot) in harness
|
|
|
|
.chain
|
2021-07-06 02:38:53 +00:00
|
|
|
.forwards_iter_state_roots(Slot::new(0))
|
2020-08-26 00:01:06 +00:00
|
|
|
.expect("should get iter")
|
|
|
|
.map(Result::unwrap)
|
|
|
|
{
|
|
|
|
assert!(
|
|
|
|
harness
|
|
|
|
.chain
|
|
|
|
.store
|
|
|
|
.get_state(&state_root, Some(slot))
|
|
|
|
.unwrap()
|
|
|
|
.is_some(),
|
|
|
|
"state {:?} from canonical chain should be in DB",
|
|
|
|
state_root
|
|
|
|
);
|
2021-07-06 02:38:53 +00:00
|
|
|
max_slot = Some(slot);
|
2020-08-26 00:01:06 +00:00
|
|
|
}
|
2021-07-06 02:38:53 +00:00
|
|
|
// Assert that we reached the head.
|
|
|
|
assert_eq!(
|
|
|
|
max_slot,
|
|
|
|
Some(harness.chain.head_info().expect("should get head").slot)
|
|
|
|
);
|
|
|
|
// Assert that the block root iterator reaches the head.
|
2019-12-06 03:29:06 +00:00
|
|
|
assert_eq!(
|
|
|
|
harness
|
|
|
|
.chain
|
2021-07-06 02:38:53 +00:00
|
|
|
.forwards_iter_block_roots(Slot::new(0))
|
2020-01-06 06:30:37 +00:00
|
|
|
.expect("should get iter")
|
2019-12-06 03:29:06 +00:00
|
|
|
.last()
|
2020-06-09 23:55:44 +00:00
|
|
|
.map(Result::unwrap)
|
2019-12-06 03:29:06 +00:00
|
|
|
.map(|(_, slot)| slot),
|
2021-07-06 02:38:53 +00:00
|
|
|
Some(harness.chain.head_info().expect("should get head").slot)
|
2019-12-06 03:29:06 +00:00
|
|
|
);
|
|
|
|
}
|
2020-04-20 09:59:56 +00:00
|
|
|
|
|
|
|
fn get_finalized_epoch_boundary_blocks(
|
|
|
|
dump: &[BeaconSnapshot<MinimalEthSpec>],
|
|
|
|
) -> HashSet<SignedBeaconBlockHash> {
|
|
|
|
dump.iter()
|
|
|
|
.cloned()
|
2021-07-09 06:15:32 +00:00
|
|
|
.map(|checkpoint| checkpoint.beacon_state.finalized_checkpoint().root.into())
|
2020-04-20 09:59:56 +00:00
|
|
|
.collect()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn get_blocks(dump: &[BeaconSnapshot<MinimalEthSpec>]) -> HashSet<SignedBeaconBlockHash> {
|
|
|
|
dump.iter()
|
|
|
|
.cloned()
|
|
|
|
.map(|checkpoint| checkpoint.beacon_block_root.into())
|
|
|
|
.collect()
|
|
|
|
}
|