From 41c3294c16096d28000b2058f7db0076a8e57956 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Tue, 17 Mar 2020 23:05:55 +1100 Subject: [PATCH] Test corrections (#925) * Merge #913 * Correct release tests * Completed release test corrections --- .../beacon_chain/tests/attestation_tests.rs | 32 +++++++++---- beacon_node/beacon_chain/tests/store_tests.rs | 2 +- beacon_node/beacon_chain/tests/tests.rs | 2 +- beacon_node/network/src/persisted_dht.rs | 12 ++--- beacon_node/network/src/service.rs | 4 +- beacon_node/network/src/service/tests.rs | 9 ++-- eth2/operation_pool/src/lib.rs | 48 ++++++++++++++----- 7 files changed, 71 insertions(+), 38 deletions(-) diff --git a/beacon_node/beacon_chain/tests/attestation_tests.rs b/beacon_node/beacon_chain/tests/attestation_tests.rs index 52ccd67c4..0420f2b6c 100644 --- a/beacon_node/beacon_chain/tests/attestation_tests.rs +++ b/beacon_node/beacon_chain/tests/attestation_tests.rs @@ -56,7 +56,7 @@ fn attestation_validity() { .expect("should get at least one attestation"); assert_eq!( - chain.process_attestation(valid_attestation.clone()), + chain.process_attestation(valid_attestation.clone(), Some(false)), Ok(AttestationProcessingOutcome::Processed), "should accept valid attestation" ); @@ -71,7 +71,7 @@ fn attestation_validity() { assert_eq!( harness .chain - .process_attestation(epoch_mismatch_attestation), + .process_attestation(epoch_mismatch_attestation, Some(false)), Ok(AttestationProcessingOutcome::BadTargetEpoch), "should not accept attestation where the slot is not in the same epoch as the target" ); @@ -85,7 +85,9 @@ fn attestation_validity() { early_attestation.data.slot = (current_epoch + 1).start_slot(MainnetEthSpec::slots_per_epoch()); assert_eq!( - harness.chain.process_attestation(early_attestation), + harness + .chain + .process_attestation(early_attestation, Some(false)), Ok(AttestationProcessingOutcome::FutureEpoch { attestation_epoch: current_epoch + 1, current_epoch @@ -118,7 +120,9 @@ fn attestation_validity() { .expect("should get at least one late attestation"); assert_eq!( - harness.chain.process_attestation(late_attestation), + harness + .chain + .process_attestation(late_attestation, Some(false)), Ok(AttestationProcessingOutcome::PastEpoch { attestation_epoch: current_epoch - 2, current_epoch @@ -134,7 +138,9 @@ fn attestation_validity() { bad_target_attestation.data.target.root = Hash256::from_low_u64_be(42); assert_eq!( - harness.chain.process_attestation(bad_target_attestation), + harness + .chain + .process_attestation(bad_target_attestation, Some(false)), Ok(AttestationProcessingOutcome::UnknownTargetRoot( Hash256::from_low_u64_be(42) )), @@ -149,7 +155,9 @@ fn attestation_validity() { future_block_attestation.data.slot -= 1; assert_eq!( - harness.chain.process_attestation(future_block_attestation), + harness + .chain + .process_attestation(future_block_attestation, Some(false)), Ok(AttestationProcessingOutcome::AttestsToFutureBlock { block: current_slot, attestation: current_slot - 1 @@ -165,7 +173,9 @@ fn attestation_validity() { bad_head_attestation.data.beacon_block_root = Hash256::from_low_u64_be(42); assert_eq!( - harness.chain.process_attestation(bad_head_attestation), + harness + .chain + .process_attestation(bad_head_attestation, Some(false)), Ok(AttestationProcessingOutcome::UnknownHeadBlock { beacon_block_root: Hash256::from_low_u64_be(42) }), @@ -183,7 +193,9 @@ fn attestation_validity() { bad_signature_attestation.signature = agg_sig; assert_eq!( - harness.chain.process_attestation(bad_signature_attestation), + harness + .chain + .process_attestation(bad_signature_attestation, Some(false)), Ok(AttestationProcessingOutcome::InvalidSignature), "should not accept bad_signature attestation" ); @@ -199,7 +211,7 @@ fn attestation_validity() { assert_eq!( harness .chain - .process_attestation(empty_bitfield_attestation), + .process_attestation(empty_bitfield_attestation, Some(false)), Ok(AttestationProcessingOutcome::EmptyAggregationBitfield), "should not accept empty_bitfield attestation" ); @@ -247,7 +259,7 @@ fn attestation_that_skips_epochs() { .expect("should get at least one attestation"); assert_eq!( - harness.chain.process_attestation(attestation), + harness.chain.process_attestation(attestation, Some(false)), Ok(AttestationProcessingOutcome::Processed), "should process attestation that skips slots" ); diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index ea36a0125..6af117a92 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -306,7 +306,7 @@ fn epoch_boundary_state_attestation_processing() { .epoch; let res = harness .chain - .process_attestation_internal(attestation.clone()); + .process_attestation_internal(attestation.clone(), Some(true)); let current_epoch = harness.chain.epoch().expect("should get epoch"); let attestation_epoch = attestation.data.target.epoch; diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 999b25464..a238a222d 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -449,7 +449,7 @@ fn attestations_with_increasing_slots() { for attestation in attestations { let attestation_epoch = attestation.data.target.epoch; - let res = harness.chain.process_attestation(attestation); + let res = harness.chain.process_attestation(attestation, Some(false)); if attestation_epoch + 1 < current_epoch { assert_eq!( diff --git a/beacon_node/network/src/persisted_dht.rs b/beacon_node/network/src/persisted_dht.rs index c43e5d84f..bf9d70430 100644 --- a/beacon_node/network/src/persisted_dht.rs +++ b/beacon_node/network/src/persisted_dht.rs @@ -1,15 +1,13 @@ -use beacon_chain::BeaconChainTypes; use eth2_libp2p::Enr; use rlp; use std::sync::Arc; -use store::Store; -use store::{DBColumn, Error as StoreError, SimpleStoreItem}; -use types::Hash256; +use store::{DBColumn, Error as StoreError, SimpleStoreItem, Store}; +use types::{EthSpec, Hash256}; /// 32-byte key for accessing the `DhtEnrs`. pub const DHT_DB_KEY: &str = "PERSISTEDDHTPERSISTEDDHTPERSISTE"; -pub fn load_dht(store: Arc) -> Vec { +pub fn load_dht, E: EthSpec>(store: Arc) -> Vec { // Load DHT from store let key = Hash256::from_slice(&DHT_DB_KEY.as_bytes()); match store.get(&key) { @@ -22,8 +20,8 @@ pub fn load_dht(store: Arc) -> Vec { } /// Attempt to persist the ENR's in the DHT to `self.store`. -pub fn persist_dht( - store: Arc, +pub fn persist_dht, E: EthSpec>( + store: Arc, enrs: Vec, ) -> Result<(), store::Error> { let key = Hash256::from_slice(&DHT_DB_KEY.as_bytes()); diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 73dfd1c0a..be1035f6b 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -75,7 +75,7 @@ impl NetworkService { // launch libp2p service let (network_globals, mut libp2p) = LibP2PService::new(config, network_log.clone())?; - for enr in load_dht::(store.clone()) { + for enr in load_dht::(store.clone()) { libp2p.swarm.add_enr(enr); } @@ -135,7 +135,7 @@ fn spawn_service( "Number of peers" => format!("{}", enrs.len()), ); - match persist_dht::(service.store.clone(), enrs) { + match persist_dht::(service.store.clone(), enrs) { Err(e) => error!( log, "Failed to persist DHT on drop"; diff --git a/beacon_node/network/src/service/tests.rs b/beacon_node/network/src/service/tests.rs index c77cd064b..90a6170db 100644 --- a/beacon_node/network/src/service/tests.rs +++ b/beacon_node/network/src/service/tests.rs @@ -2,7 +2,7 @@ #[cfg(test)] mod tests { use crate::persisted_dht::load_dht; - use crate::{NetworkConfig, Service}; + use crate::{NetworkConfig, NetworkService}; use beacon_chain::test_utils::BeaconChainHarness; use eth2_libp2p::Enr; use futures::{Future, IntoFuture}; @@ -10,7 +10,6 @@ mod tests { use sloggers::{null::NullLoggerBuilder, Build}; use std::str::FromStr; use std::sync::Arc; - use store::MemoryStore; use tokio::runtime::Runtime; use types::{test_utils::generate_deterministic_keypairs, MinimalEthSpec}; @@ -44,14 +43,14 @@ mod tests { .block_on_all( // Create a new network service which implicitly gets dropped at the // end of the block. - Service::new(beacon_chain.clone(), &config, &executor, log.clone()) + NetworkService::start(beacon_chain.clone(), &config, &executor, log.clone()) .into_future() - .and_then(move |(_service, _)| Ok(())), + .and_then(move |(_globals, _service, _exit)| Ok(())), ) .unwrap(); // Load the persisted dht from the store - let persisted_enrs = load_dht::, MinimalEthSpec>(store); + let persisted_enrs = load_dht(store); assert!( persisted_enrs.contains(&enrs[0]), "should have persisted the first ENR to store" diff --git a/eth2/operation_pool/src/lib.rs b/eth2/operation_pool/src/lib.rs index 99a81fbad..4c809efe6 100644 --- a/eth2/operation_pool/src/lib.rs +++ b/eth2/operation_pool/src/lib.rs @@ -669,11 +669,16 @@ mod release_tests { spec, None, ); - op_pool.insert_attestation(att, &state.fork, spec).unwrap(); + op_pool + .insert_aggregate_attestation(att, &state.fork, spec) + .unwrap(); } } - assert_eq!(op_pool.attestations.read().len(), committees.len()); + assert_eq!( + op_pool.aggregate_attestations.read().len(), + committees.len() + ); assert_eq!(op_pool.num_attestations(), committees.len()); // Before the min attestation inclusion delay, get_attestations shouldn't return anything. @@ -701,13 +706,15 @@ mod release_tests { ); // Prune attestations shouldn't do anything at this point. - op_pool.prune_attestations(state); + let epoch = state.slot.epoch(MainnetEthSpec::slots_per_epoch()); + op_pool.prune_attestations(&epoch); assert_eq!(op_pool.num_attestations(), committees.len()); // But once we advance to more than an epoch after the attestation, it should prune it // out of existence. state.slot += 2 * MainnetEthSpec::slots_per_epoch(); - op_pool.prune_attestations(state); + let epoch = state.slot.epoch(MainnetEthSpec::slots_per_epoch()); + op_pool.prune_attestations(&epoch); assert_eq!(op_pool.num_attestations(), 0); } @@ -738,9 +745,11 @@ mod release_tests { None, ); op_pool - .insert_attestation(att.clone(), &state.fork, spec) + .insert_aggregate_attestation(att.clone(), &state.fork, spec) + .unwrap(); + op_pool + .insert_aggregate_attestation(att, &state.fork, spec) .unwrap(); - op_pool.insert_attestation(att, &state.fork, spec).unwrap(); } assert_eq!(op_pool.num_attestations(), committees.len()); @@ -777,13 +786,18 @@ mod release_tests { spec, None, ); - op_pool.insert_attestation(att, &state.fork, spec).unwrap(); + op_pool + .insert_aggregate_attestation(att, &state.fork, spec) + .unwrap(); } } // The attestations should get aggregated into two attestations that comprise all // validators. - assert_eq!(op_pool.attestations.read().len(), committees.len()); + assert_eq!( + op_pool.aggregate_attestations.read().len(), + committees.len() + ); assert_eq!(op_pool.num_attestations(), 2 * committees.len()); } @@ -825,7 +839,9 @@ mod release_tests { spec, if i == 0 { None } else { Some(0) }, ); - op_pool.insert_attestation(att, &state.fork, spec).unwrap(); + op_pool + .insert_aggregate_attestation(att, &state.fork, spec) + .unwrap(); } }; @@ -840,7 +856,10 @@ mod release_tests { let num_small = target_committee_size / small_step_size; let num_big = target_committee_size / big_step_size; - assert_eq!(op_pool.attestations.read().len(), committees.len()); + assert_eq!( + op_pool.aggregate_attestations.read().len(), + committees.len() + ); assert_eq!( op_pool.num_attestations(), (num_small + num_big) * committees.len() @@ -898,7 +917,9 @@ mod release_tests { spec, if i == 0 { None } else { Some(0) }, ); - op_pool.insert_attestation(att, &state.fork, spec).unwrap(); + op_pool + .insert_aggregate_attestation(att, &state.fork, spec) + .unwrap(); } }; @@ -913,7 +934,10 @@ mod release_tests { let num_small = target_committee_size / small_step_size; let num_big = target_committee_size / big_step_size; - assert_eq!(op_pool.attestations.read().len(), committees.len()); + assert_eq!( + op_pool.aggregate_attestations.read().len(), + committees.len() + ); assert_eq!( op_pool.num_attestations(), (num_small + num_big) * committees.len()