Alternative (to BeaconChainHarness) BeaconChain testing API (#1380)

The PR:

* Adds the ability to generate a crucial test scenario that isn't possible with `BeaconChainHarness` (i.e. two blocks occupying the same slot; previously forks necessitated skipping slots):

![image](https://user-images.githubusercontent.com/165678/88195404-4bce3580-cc40-11ea-8c08-b48d2e1d5959.png)

* New testing API: Instead of repeatedly calling add_block(), you generate a sorted `Vec<Slot>` and leave it up to the framework to generate blocks at those slots.
* Jumping backwards to an earlier epoch is a hard error, so that tests necessarily generate blocks in a epoch-by-epoch manner.
* Configures the test logger so that output is printed on the console in case a test fails.  The logger also plays well with `--nocapture`, contrary to the existing testing framework
* Rewrites existing fork pruning tests to use the new API
* Adds a tests that triggers finalization at a non epoch boundary slot
* Renamed `BeaconChainYoke` to `BeaconChainTestingRig` because the former has been too confusing
* Fixed multiple tests (e.g. `block_production_different_shuffling_long`, `delete_blocks_and_states`, `shuffling_compatible_simple_fork`) that relied on a weird (and accidental) feature of the old `BeaconChainHarness` that attestations aren't produced for epochs earlier than the current one, thus masking potential bugs in test cases.

Co-authored-by: Michael Sproul <michael@sigmaprime.io>
This commit is contained in:
Adam Szkoda 2020-08-26 09:24:55 +00:00
parent 30bb7aecfb
commit d9f4819fe0
17 changed files with 1220 additions and 824 deletions

2
Cargo.lock generated
View File

@ -346,11 +346,13 @@ dependencies = [
"lighthouse_metrics",
"log 0.4.11",
"lru",
"maplit",
"merkle_proof",
"operation_pool",
"parking_lot 0.11.0",
"proto_array",
"rand 0.7.3",
"rand_core 0.5.1",
"rayon",
"safe_arith",
"serde",

View File

@ -11,6 +11,7 @@ participation_metrics = [] # Exposes validator participation metrics to Prometh
[dev-dependencies]
int_to_bytes = { path = "../../consensus/int_to_bytes" }
maplit = "1.0.2"
[dependencies]
eth2_config = { path = "../../common/eth2_config" }
@ -45,6 +46,7 @@ futures = "0.3.5"
genesis = { path = "../genesis" }
integer-sqrt = "0.1.3"
rand = "0.7.3"
rand_core = "0.5.1"
proto_array = { path = "../../consensus/proto_array" }
lru = "0.5.1"
tempfile = "3.1.0"

View File

@ -1978,7 +1978,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
self.head_tracker.clone(),
old_finalized_checkpoint,
new_finalized_checkpoint,
);
)?;
let _ = self.event_handler.register(EventKind::BeaconFinalization {
epoch: new_finalized_checkpoint.epoch,
@ -2070,10 +2070,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.beacon_block_root;
let mut visited: HashSet<Hash256> = HashSet::new();
let mut finalized_blocks: HashSet<Hash256> = HashSet::new();
let mut justified_blocks: HashSet<Hash256> = HashSet::new();
let genesis_block_hash = Hash256::zero();
writeln!(output, "digraph beacon {{").unwrap();
writeln!(output, "\t_{:?}[label=\"genesis\"];", genesis_block_hash).unwrap();
writeln!(output, "\t_{:?}[label=\"zero\"];", genesis_block_hash).unwrap();
// Canonical head needs to be processed first as otherwise finalized blocks aren't detected
// properly.
@ -2104,6 +2105,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.unwrap()
.unwrap();
finalized_blocks.insert(state.finalized_checkpoint.root);
justified_blocks.insert(state.current_justified_checkpoint.root);
justified_blocks.insert(state.previous_justified_checkpoint.root);
}
if block_hash == canonical_head_hash {
@ -2124,6 +2127,15 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
signed_beacon_block.slot()
)
.unwrap();
} else if justified_blocks.contains(&block_hash) {
writeln!(
output,
"\t_{:?}[label=\"{} ({})\" shape=cds];",
block_hash,
block_hash,
signed_beacon_block.slot()
)
.unwrap();
} else {
writeln!(
output,
@ -2153,6 +2165,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let mut file = std::fs::File::create(file_name).unwrap();
self.dump_as_dot(&mut file);
}
// Should be used in tests only
pub fn set_graffiti(&mut self, graffiti: Graffiti) {
self.graffiti = graffiti;
}
}
impl<T: BeaconChainTypes> Drop for BeaconChain<T> {

View File

@ -2,6 +2,10 @@
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate slog;
extern crate slog_term;
pub mod attestation_verification;
mod beacon_chain;
mod beacon_fork_choice_store;

View File

@ -1,13 +1,13 @@
use crate::errors::BeaconChainError;
use crate::head_tracker::HeadTracker;
use parking_lot::Mutex;
use slog::{debug, error, warn, Logger};
use slog::{debug, warn, Logger};
use std::collections::{HashMap, HashSet};
use std::mem;
use std::sync::mpsc;
use std::sync::Arc;
use std::thread;
use store::hot_cold_store::{process_finalization, HotColdDBError};
use store::hot_cold_store::{migrate_database, HotColdDBError};
use store::iter::RootsIterator;
use store::{Error, ItemStore, StoreOp};
pub use store::{HotColdDB, MemoryStore};
@ -43,7 +43,8 @@ pub trait Migrate<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>:
_head_tracker: Arc<HeadTracker>,
_old_finalized_checkpoint: Checkpoint,
_new_finalized_checkpoint: Checkpoint,
) {
) -> Result<(), BeaconChainError> {
Ok(())
}
/// Traverses live heads and prunes blocks and states of chains that we know can't be built
@ -237,6 +238,7 @@ pub trait Migrate<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>:
.map(|(slot, state_hash)| StoreOp::DeleteState(state_hash, slot)),
)
.collect();
store.do_atomically(batch)?;
for head_hash in abandoned_heads.into_iter() {
head_tracker.remove_head(head_hash);
@ -252,6 +254,17 @@ pub trait Migrate<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>:
pub struct NullMigrator;
impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> Migrate<E, Hot, Cold> for NullMigrator {
fn process_finalization(
&self,
_finalized_state_root: BeaconStateHash,
_new_finalized_state: BeaconState<E>,
_head_tracker: Arc<HeadTracker>,
_old_finalized_checkpoint: Checkpoint,
_new_finalized_checkpoint: Checkpoint,
) -> Result<(), BeaconChainError> {
Ok(())
}
fn new(_: Arc<HotColdDB<E, Hot, Cold>>, _: Logger) -> Self {
NullMigrator
}
@ -279,8 +292,8 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> Migrate<E, Hot, Cold>
head_tracker: Arc<HeadTracker>,
old_finalized_checkpoint: Checkpoint,
new_finalized_checkpoint: Checkpoint,
) {
if let Err(e) = Self::prune_abandoned_forks(
) -> Result<(), BeaconChainError> {
Self::prune_abandoned_forks(
self.db.clone(),
head_tracker,
finalized_state_root,
@ -288,16 +301,23 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> Migrate<E, Hot, Cold>
old_finalized_checkpoint,
new_finalized_checkpoint,
&self.log,
) {
error!(&self.log, "Pruning error"; "error" => format!("{:?}", e));
}
)?;
if let Err(e) = process_finalization(
match migrate_database(
self.db.clone(),
finalized_state_root.into(),
&new_finalized_state,
) {
error!(&self.log, "Migration error"; "error" => format!("{:?}", e));
Ok(()) => Ok(()),
Err(Error::HotColdDBError(HotColdDBError::FreezeSlotUnaligned(slot))) => {
debug!(
self.log,
"Database migration postponed, unaligned finalized block";
"slot" => slot.as_u64()
);
Ok(())
}
Err(e) => Err(e.into()),
}
}
}
@ -332,7 +352,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> Migrate<E, Hot, Cold>
head_tracker: Arc<HeadTracker>,
old_finalized_checkpoint: Checkpoint,
new_finalized_checkpoint: Checkpoint,
) {
) -> Result<(), BeaconChainError> {
let (ref mut tx, ref mut thread) = *self.tx_thread.lock();
if let Err(tx_err) = tx.send((
@ -360,6 +380,8 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> Migrate<E, Hot, Cold>
// Retry at most once, we could recurse but that would risk overflowing the stack.
let _ = tx.send(tx_err.0);
}
Ok(())
}
}
@ -394,7 +416,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
Err(e) => warn!(log, "Block pruning failed: {:?}", e),
}
match process_finalization(db.clone(), state_root.into(), &state) {
match migrate_database(db.clone(), state_root.into(), &state) {
Ok(()) => {}
Err(Error::HotColdDBError(HotColdDBError::FreezeSlotUnaligned(slot))) => {
debug!(

File diff suppressed because it is too large Load Diff

View File

@ -26,7 +26,7 @@ lazy_static! {
fn produces_attestations() {
let num_blocks_produced = MainnetEthSpec::slots_per_epoch() * 4;
let harness = BeaconChainHarness::new(
let mut harness = BeaconChainHarness::new_with_store_config(
MainnetEthSpec,
KEYPAIRS[..].to_vec(),
StoreConfig::default(),

View File

@ -5,7 +5,9 @@ extern crate lazy_static;
use beacon_chain::{
attestation_verification::Error as AttnError,
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, HarnessType},
test_utils::{
AttestationStrategy, BeaconChainHarness, BlockStrategy, NullMigratorEphemeralHarnessType,
},
BeaconChain, BeaconChainTypes,
};
use int_to_bytes::int_to_bytes32;
@ -30,7 +32,7 @@ lazy_static! {
}
/// Returns a beacon chain harness.
fn get_harness(validator_count: usize) -> BeaconChainHarness<HarnessType<E>> {
fn get_harness(validator_count: usize) -> BeaconChainHarness<NullMigratorEphemeralHarnessType<E>> {
let harness = BeaconChainHarness::new_with_target_aggregators(
MainnetEthSpec,
KEYPAIRS[0..validator_count].to_vec(),
@ -184,8 +186,7 @@ fn get_non_aggregator<T: BeaconChainTypes>(
/// Tests verification of `SignedAggregateAndProof` from the gossip network.
#[test]
fn aggregated_gossip_verification() {
let harness = get_harness(VALIDATOR_COUNT);
let chain = &harness.chain;
let mut harness = get_harness(VALIDATOR_COUNT);
// Extend the chain out a few epochs so we have some chain depth to play with.
harness.extend_chain(
@ -197,7 +198,7 @@ fn aggregated_gossip_verification() {
// Advance into a slot where there have not been blocks or attestations produced.
harness.advance_slot();
let current_slot = chain.slot().expect("should get slot");
let current_slot = harness.chain.slot().expect("should get slot");
assert_eq!(
current_slot % E::slots_per_epoch(),
@ -532,8 +533,7 @@ fn aggregated_gossip_verification() {
/// Tests the verification conditions for an unaggregated attestation on the gossip network.
#[test]
fn unaggregated_gossip_verification() {
let harness = get_harness(VALIDATOR_COUNT);
let chain = &harness.chain;
let mut harness = get_harness(VALIDATOR_COUNT);
// Extend the chain out a few epochs so we have some chain depth to play with.
harness.extend_chain(
@ -545,8 +545,8 @@ fn unaggregated_gossip_verification() {
// Advance into a slot where there have not been blocks or attestations produced.
harness.advance_slot();
let current_slot = chain.slot().expect("should get slot");
let current_epoch = chain.epoch().expect("should get epoch");
let current_slot = harness.chain.slot().expect("should get slot");
let current_epoch = harness.chain.epoch().expect("should get epoch");
assert_eq!(
current_slot % E::slots_per_epoch(),
@ -772,8 +772,7 @@ fn unaggregated_gossip_verification() {
/// This also checks that we can do a state lookup if we don't get a hit from the shuffling cache.
#[test]
fn attestation_that_skips_epochs() {
let harness = get_harness(VALIDATOR_COUNT);
let chain = &harness.chain;
let mut harness = get_harness(VALIDATOR_COUNT);
// Extend the chain out a few epochs so we have some chain depth to play with.
harness.extend_chain(
@ -782,16 +781,18 @@ fn attestation_that_skips_epochs() {
AttestationStrategy::SomeValidators(vec![]),
);
let current_slot = chain.slot().expect("should get slot");
let current_epoch = chain.epoch().expect("should get epoch");
let current_slot = harness.chain.slot().expect("should get slot");
let current_epoch = harness.chain.epoch().expect("should get epoch");
let earlier_slot = (current_epoch - 2).start_slot(MainnetEthSpec::slots_per_epoch());
let earlier_block = chain
let earlier_block = harness
.chain
.block_at_slot(earlier_slot)
.expect("should not error getting block at slot")
.expect("should find block at slot");
let mut state = chain
let mut state = harness
.chain
.get_state(&earlier_block.state_root(), Some(earlier_slot))
.expect("should not error getting state")
.expect("should find state");

View File

@ -4,7 +4,9 @@
extern crate lazy_static;
use beacon_chain::{
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, HarnessType},
test_utils::{
AttestationStrategy, BeaconChainHarness, BlockStrategy, NullMigratorEphemeralHarnessType,
},
BeaconSnapshot, BlockError,
};
use store::config::StoreConfig;
@ -31,7 +33,7 @@ lazy_static! {
}
fn get_chain_segment() -> Vec<BeaconSnapshot<E>> {
let harness = get_harness(VALIDATOR_COUNT);
let mut harness = get_harness(VALIDATOR_COUNT);
harness.extend_chain(
CHAIN_SEGMENT_LENGTH,
@ -48,8 +50,8 @@ fn get_chain_segment() -> Vec<BeaconSnapshot<E>> {
.collect()
}
fn get_harness(validator_count: usize) -> BeaconChainHarness<HarnessType<E>> {
let harness = BeaconChainHarness::new(
fn get_harness(validator_count: usize) -> BeaconChainHarness<NullMigratorEphemeralHarnessType<E>> {
let harness = BeaconChainHarness::new_with_store_config(
MainnetEthSpec,
KEYPAIRS[0..validator_count].to_vec(),
StoreConfig::default(),
@ -81,7 +83,7 @@ fn junk_aggregate_signature() -> AggregateSignature {
fn update_proposal_signatures(
snapshots: &mut [BeaconSnapshot<E>],
harness: &BeaconChainHarness<HarnessType<E>>,
harness: &BeaconChainHarness<NullMigratorEphemeralHarnessType<E>>,
) {
for snapshot in snapshots {
let spec = &harness.chain.spec;
@ -91,7 +93,7 @@ fn update_proposal_signatures(
.get_beacon_proposer_index(slot, spec)
.expect("should find proposer index");
let keypair = harness
.keypairs
.validators_keypairs
.get(proposer_index)
.expect("proposer keypair should be available");
@ -274,7 +276,7 @@ fn chain_segment_non_linear_slots() {
}
fn assert_invalid_signature(
harness: &BeaconChainHarness<HarnessType<E>>,
harness: &BeaconChainHarness<NullMigratorEphemeralHarnessType<E>>,
block_index: usize,
snapshots: &[BeaconSnapshot<E>],
item: &str,
@ -325,7 +327,7 @@ fn assert_invalid_signature(
// slot) tuple.
}
fn get_invalid_sigs_harness() -> BeaconChainHarness<HarnessType<E>> {
fn get_invalid_sigs_harness() -> BeaconChainHarness<NullMigratorEphemeralHarnessType<E>> {
let harness = get_harness(VALIDATOR_COUNT);
harness
.chain

View File

@ -7,7 +7,7 @@ extern crate lazy_static;
use beacon_chain::observed_operations::ObservationOutcome;
use beacon_chain::test_utils::{
AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType,
AttestationStrategy, BeaconChainHarness, BlockStrategy, BlockingMigratorDiskHarnessType,
};
use sloggers::{null::NullLoggerBuilder, Build};
use std::sync::Arc;
@ -28,7 +28,7 @@ lazy_static! {
}
type E = MinimalEthSpec;
type TestHarness = BeaconChainHarness<DiskHarnessType<E>>;
type TestHarness = BeaconChainHarness<BlockingMigratorDiskHarnessType<E>>;
type HotColdDB = store::HotColdDB<E, LevelDB<E>, LevelDB<E>>;
fn get_store(db_path: &TempDir) -> Arc<HotColdDB> {
@ -57,8 +57,8 @@ fn get_harness(store: Arc<HotColdDB>, validator_count: usize) -> TestHarness {
fn voluntary_exit() {
let db_path = tempdir().unwrap();
let store = get_store(&db_path);
let harness = get_harness(store.clone(), VALIDATOR_COUNT);
let spec = &harness.chain.spec;
let mut harness = get_harness(store.clone(), VALIDATOR_COUNT);
let spec = &harness.chain.spec.clone();
harness.extend_chain(
(E::slots_per_epoch() * (spec.shard_committee_period + 1)) as usize,

View File

@ -44,7 +44,7 @@ fn finalizes_after_resuming_from_db() {
let db_path = tempdir().unwrap();
let store = get_store(&db_path);
let harness = BeaconChainHarness::new_with_disk_store(
let mut harness = BeaconChainHarness::new_with_disk_store(
MinimalEthSpec,
store.clone(),
KEYPAIRS[0..validator_count].to_vec(),
@ -88,7 +88,7 @@ fn finalizes_after_resuming_from_db() {
let data_dir = harness.data_dir;
let original_chain = harness.chain;
let resumed_harness = BeaconChainHarness::resume_from_disk_store(
let mut resumed_harness = BeaconChainHarness::resume_from_disk_store(
MinimalEthSpec,
store,
KEYPAIRS[0..validator_count].to_vec(),

File diff suppressed because it is too large Load Diff

View File

@ -6,7 +6,8 @@ extern crate lazy_static;
use beacon_chain::{
attestation_verification::Error as AttnError,
test_utils::{
AttestationStrategy, BeaconChainHarness, BlockStrategy, HarnessType, OP_POOL_DB_KEY,
AttestationStrategy, BeaconChainHarness, BlockStrategy, NullMigratorEphemeralHarnessType,
OP_POOL_DB_KEY,
},
};
use operation_pool::PersistedOperationPool;
@ -24,8 +25,10 @@ lazy_static! {
static ref KEYPAIRS: Vec<Keypair> = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
}
fn get_harness(validator_count: usize) -> BeaconChainHarness<HarnessType<MinimalEthSpec>> {
let harness = BeaconChainHarness::new(
fn get_harness(
validator_count: usize,
) -> BeaconChainHarness<NullMigratorEphemeralHarnessType<MinimalEthSpec>> {
let harness = BeaconChainHarness::new_with_store_config(
MinimalEthSpec,
KEYPAIRS[0..validator_count].to_vec(),
StoreConfig::default(),
@ -64,7 +67,7 @@ fn massive_skips() {
fn iterators() {
let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 2 - 1;
let harness = get_harness(VALIDATOR_COUNT);
let mut harness = get_harness(VALIDATOR_COUNT);
harness.extend_chain(
num_blocks_produced as usize,
@ -139,7 +142,7 @@ fn iterators() {
#[test]
fn chooses_fork() {
let harness = get_harness(VALIDATOR_COUNT);
let mut harness = get_harness(VALIDATOR_COUNT);
let two_thirds = (VALIDATOR_COUNT / 3) * 2;
let delay = MinimalEthSpec::default_spec().min_attestation_inclusion_delay as usize;
@ -190,7 +193,7 @@ fn chooses_fork() {
fn finalizes_with_full_participation() {
let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5;
let harness = get_harness(VALIDATOR_COUNT);
let mut harness = get_harness(VALIDATOR_COUNT);
harness.extend_chain(
num_blocks_produced as usize,
@ -225,7 +228,7 @@ fn finalizes_with_full_participation() {
fn finalizes_with_two_thirds_participation() {
let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5;
let harness = get_harness(VALIDATOR_COUNT);
let mut harness = get_harness(VALIDATOR_COUNT);
let two_thirds = (VALIDATOR_COUNT / 3) * 2;
let attesters = (0..two_thirds).collect();
@ -268,7 +271,7 @@ fn finalizes_with_two_thirds_participation() {
fn does_not_finalize_with_less_than_two_thirds_participation() {
let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5;
let harness = get_harness(VALIDATOR_COUNT);
let mut harness = get_harness(VALIDATOR_COUNT);
let two_thirds = (VALIDATOR_COUNT / 3) * 2;
let less_than_two_thirds = two_thirds - 1;
@ -305,7 +308,7 @@ fn does_not_finalize_with_less_than_two_thirds_participation() {
fn does_not_finalize_without_attestation() {
let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5;
let harness = get_harness(VALIDATOR_COUNT);
let mut harness = get_harness(VALIDATOR_COUNT);
harness.extend_chain(
num_blocks_produced as usize,
@ -338,7 +341,7 @@ fn does_not_finalize_without_attestation() {
fn roundtrip_operation_pool() {
let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5;
let harness = get_harness(VALIDATOR_COUNT);
let mut harness = get_harness(VALIDATOR_COUNT);
// Add some attestations
harness.extend_chain(
@ -370,7 +373,7 @@ fn roundtrip_operation_pool() {
fn unaggregated_attestations_added_to_fork_choice_some_none() {
let num_blocks_produced = MinimalEthSpec::slots_per_epoch() / 2;
let harness = get_harness(VALIDATOR_COUNT);
let mut harness = get_harness(VALIDATOR_COUNT);
harness.extend_chain(
num_blocks_produced as usize,
@ -424,7 +427,7 @@ fn unaggregated_attestations_added_to_fork_choice_some_none() {
fn attestations_with_increasing_slots() {
let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5;
let harness = get_harness(VALIDATOR_COUNT);
let mut harness = get_harness(VALIDATOR_COUNT);
let mut attestations = vec![];
@ -486,7 +489,7 @@ fn attestations_with_increasing_slots() {
fn unaggregated_attestations_added_to_fork_choice_all_updated() {
let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 2 - 1;
let harness = get_harness(VALIDATOR_COUNT);
let mut harness = get_harness(VALIDATOR_COUNT);
harness.extend_chain(
num_blocks_produced as usize,
@ -541,7 +544,7 @@ fn unaggregated_attestations_added_to_fork_choice_all_updated() {
fn run_skip_slot_test(skip_slots: u64) {
let num_validators = 8;
let harness_a = get_harness(num_validators);
let mut harness_a = get_harness(num_validators);
let harness_b = get_harness(num_validators);
for _ in 0..skip_slots {

View File

@ -23,7 +23,7 @@ mod tests {
let log = get_logger();
let beacon_chain = Arc::new(
BeaconChainHarness::new(
BeaconChainHarness::new_with_store_config(
MinimalEthSpec,
generate_deterministic_keypairs(8),
StoreConfig::default(),

View File

@ -825,7 +825,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
}
/// Advance the split point of the store, moving new finalized states to the freezer.
pub fn process_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
pub fn migrate_database<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
store: Arc<HotColdDB<E, Hot, Cold>>,
frozen_head_root: Hash256,
frozen_head: &BeaconState<E>,

View File

@ -27,7 +27,7 @@ pub mod iter;
use std::borrow::Cow;
pub use self::config::StoreConfig;
pub use self::hot_cold_store::{HotColdDB, HotStateSummary, Split};
pub use self::hot_cold_store::{BlockReplay, HotColdDB, HotStateSummary, Split};
pub use self::leveldb_store::LevelDB;
pub use self::memory_store::MemoryStore;
pub use self::partial_beacon_state::PartialBeaconState;

View File

@ -1,8 +1,10 @@
#![cfg(not(debug_assertions))]
use beacon_chain::{
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, HarnessType},
BeaconChain, BeaconChainError, BeaconForkChoiceStore, ForkChoiceError,
test_utils::{
AttestationStrategy, BeaconChainHarness, BlockStrategy, NullMigratorEphemeralHarnessType,
},
BeaconChain, BeaconChainError, BeaconForkChoiceStore, ForkChoiceError, StateSkipConfig,
};
use fork_choice::{
ForkChoiceStore, InvalidAttestation, InvalidBlock, QueuedAttestation,
@ -18,7 +20,7 @@ use types::{BeaconBlock, BeaconState, Hash256, SignedBeaconBlock};
pub type E = MainnetEthSpec;
pub const VALIDATOR_COUNT: usize = 16;
pub const VALIDATOR_COUNT: usize = 32;
/// Defines some delay between when an attestation is created and when it is mutated.
pub enum MutationDelay {
@ -30,7 +32,7 @@ pub enum MutationDelay {
/// A helper struct to make testing fork choice more ergonomic and less repetitive.
struct ForkChoiceTest {
harness: BeaconChainHarness<HarnessType<E>>,
harness: BeaconChainHarness<NullMigratorEphemeralHarnessType<E>>,
}
impl ForkChoiceTest {
@ -115,22 +117,31 @@ impl ForkChoiceTest {
}
/// Build the chain whilst `predicate` returns `true`.
pub fn apply_blocks_while<F>(self, mut predicate: F) -> Self
pub fn apply_blocks_while<F>(mut self, mut predicate: F) -> Self
where
F: FnMut(&BeaconBlock<E>, &BeaconState<E>) -> bool,
{
self.harness.advance_slot();
self.harness.extend_chain_while(
|block, state| predicate(&block.message, state),
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
);
let mut state = self.harness.get_current_state();
let validators = self.harness.get_all_validators();
loop {
let slot = self.harness.get_current_slot();
let (block, state_) = self.harness.make_block(state, slot);
state = state_;
if !predicate(&block.message, &state) {
break;
}
let block_hash = self.harness.process_block(slot, block.clone());
self.harness
.attest_block(&state, block_hash, &block, &validators);
self.harness.advance_slot();
}
self
}
/// Apply `count` blocks to the chain (with attestations).
pub fn apply_blocks(self, count: usize) -> Self {
pub fn apply_blocks(mut self, count: usize) -> Self {
self.harness.advance_slot();
self.harness.extend_chain(
count,
@ -142,7 +153,7 @@ impl ForkChoiceTest {
}
/// Apply `count` blocks to the chain (without attestations).
pub fn apply_blocks_without_new_attestations(self, count: usize) -> Self {
pub fn apply_blocks_without_new_attestations(mut self, count: usize) -> Self {
self.harness.advance_slot();
self.harness.extend_chain(
count,
@ -181,13 +192,22 @@ impl ForkChoiceTest {
/// Applies a block directly to fork choice, bypassing the beacon chain.
///
/// Asserts the block was applied successfully.
pub fn apply_block_directly_to_fork_choice<F>(self, mut func: F) -> Self
pub fn apply_block_directly_to_fork_choice<F>(mut self, mut func: F) -> Self
where
F: FnMut(&mut BeaconBlock<E>, &mut BeaconState<E>),
{
let (mut block, mut state) = self.harness.get_block();
let state = self
.harness
.chain
.state_at_slot(
self.harness.get_current_slot() - 1,
StateSkipConfig::WithStateRoots,
)
.unwrap();
let slot = self.harness.get_current_slot();
let (mut block, mut state) = self.harness.make_block(state, slot);
func(&mut block.message, &mut state);
let current_slot = self.harness.chain.slot().unwrap();
let current_slot = self.harness.get_current_slot();
self.harness
.chain
.fork_choice
@ -201,7 +221,7 @@ impl ForkChoiceTest {
///
/// Asserts that an error occurred and allows inspecting it via `comparison_func`.
pub fn apply_invalid_block_directly_to_fork_choice<F, G>(
self,
mut self,
mut mutation_func: F,
mut comparison_func: G,
) -> Self
@ -209,9 +229,18 @@ impl ForkChoiceTest {
F: FnMut(&mut BeaconBlock<E>, &mut BeaconState<E>),
G: FnMut(ForkChoiceError),
{
let (mut block, mut state) = self.harness.get_block();
let state = self
.harness
.chain
.state_at_slot(
self.harness.get_current_slot() - 1,
StateSkipConfig::WithStateRoots,
)
.unwrap();
let slot = self.harness.get_current_slot();
let (mut block, mut state) = self.harness.make_block(state, slot);
mutation_func(&mut block.message, &mut state);
let current_slot = self.harness.chain.slot().unwrap();
let current_slot = self.harness.get_current_slot();
let err = self
.harness
.chain
@ -267,20 +296,21 @@ impl ForkChoiceTest {
///
/// Also returns some info about who created it.
fn apply_attestation_to_chain<F, G>(
self,
mut self,
delay: MutationDelay,
mut mutation_func: F,
mut comparison_func: G,
) -> Self
where
F: FnMut(&mut IndexedAttestation<E>, &BeaconChain<HarnessType<E>>),
F: FnMut(&mut IndexedAttestation<E>, &BeaconChain<NullMigratorEphemeralHarnessType<E>>),
G: FnMut(Result<(), BeaconChainError>),
{
let chain = &self.harness.chain;
let head = chain.head().expect("should get head");
let current_slot = chain.slot().expect("should get slot");
let head = self.harness.chain.head().expect("should get head");
let current_slot = self.harness.chain.slot().expect("should get slot");
let mut attestation = chain
let mut attestation = self
.harness
.chain
.produce_unaggregated_attestation(current_slot, 0)
.expect("should not error while producing attestation");
@ -298,9 +328,13 @@ impl ForkChoiceTest {
.get_committee_count_at_slot(current_slot)
.expect("should not error while getting committee count");
let subnet_id =
SubnetId::compute_subnet::<E>(current_slot, 0, committee_count, &chain.spec)
.expect("should compute subnet id");
let subnet_id = SubnetId::compute_subnet::<E>(
current_slot,
0,
committee_count,
&self.harness.chain.spec,
)
.expect("should compute subnet id");
let validator_sk = generate_deterministic_keypair(validator_index).sk;
@ -309,12 +343,14 @@ impl ForkChoiceTest {
&validator_sk,
validator_committee_index,
&head.beacon_state.fork,
chain.genesis_validators_root,
&chain.spec,
self.harness.chain.genesis_validators_root,
&self.harness.chain.spec,
)
.expect("should sign attestation");
let mut verified_attestation = chain
let mut verified_attestation = self
.harness
.chain
.verify_unaggregated_attestation_for_gossip(attestation, subnet_id)
.expect("precondition: should gossip verify attestation");
@ -327,9 +363,15 @@ impl ForkChoiceTest {
);
}
mutation_func(verified_attestation.__indexed_attestation_mut(), chain);
mutation_func(
verified_attestation.__indexed_attestation_mut(),
&self.harness.chain,
);
let result = chain.apply_attestation_to_fork_choice(&verified_attestation);
let result = self
.harness
.chain
.apply_attestation_to_fork_choice(&verified_attestation);
comparison_func(result);