Fix clippy errors on tests (#2160)

## Issue Addressed

There are some clippy error on tests.


## Proposed Changes

Enable clippy check on tests and fix the errors. 💪
This commit is contained in:
Akihito Nakano 2021-01-28 23:31:06 +00:00
parent e4b62139d7
commit 1a22a096c6
36 changed files with 513 additions and 516 deletions

View File

@ -119,7 +119,7 @@ test-full: cargo-fmt test-release test-debug test-ef
# Lints the code for bad style and potentially unsafe arithmetic using Clippy. # Lints the code for bad style and potentially unsafe arithmetic using Clippy.
# Clippy lints are opt-in per-crate for now. By default, everything is allowed except for performance and correctness lints. # Clippy lints are opt-in per-crate for now. By default, everything is allowed except for performance and correctness lints.
lint: lint:
cargo clippy --all -- -D warnings cargo clippy --all --tests -- -D warnings
# Runs the makefile in the `ef_tests` repo. # Runs the makefile in the `ef_tests` repo.
# #

View File

@ -668,7 +668,6 @@ fn is_candidate_block(block: &Eth1Block, period_start: u64, spec: &ChainSpec) ->
mod test { mod test {
use super::*; use super::*;
use environment::null_logger; use environment::null_logger;
use std::iter::FromIterator;
use types::{test_utils::DepositTestTask, MinimalEthSpec}; use types::{test_utils::DepositTestTask, MinimalEthSpec};
type E = MinimalEthSpec; type E = MinimalEthSpec;
@ -1042,10 +1041,7 @@ mod test {
let votes_to_consider = get_eth1_data_vec(slots, 0); let votes_to_consider = get_eth1_data_vec(slots, 0);
let votes = collect_valid_votes( let votes = collect_valid_votes(&state, &votes_to_consider.into_iter().collect());
&state,
&HashMap::from_iter(votes_to_consider.clone().into_iter()),
);
assert_eq!( assert_eq!(
votes.len(), votes.len(),
0, 0,
@ -1068,10 +1064,8 @@ mod test {
.collect::<Vec<_>>() .collect::<Vec<_>>()
.into(); .into();
let votes = collect_valid_votes( let votes =
&state, collect_valid_votes(&state, &votes_to_consider.clone().into_iter().collect());
&HashMap::from_iter(votes_to_consider.clone().into_iter()),
);
assert_votes!( assert_votes!(
votes, votes,
votes_to_consider[0..slots as usize / 4].to_vec(), votes_to_consider[0..slots as usize / 4].to_vec(),
@ -1099,10 +1093,7 @@ mod test {
.collect::<Vec<_>>() .collect::<Vec<_>>()
.into(); .into();
let votes = collect_valid_votes( let votes = collect_valid_votes(&state, &votes_to_consider.into_iter().collect());
&state,
&HashMap::from_iter(votes_to_consider.clone().into_iter()),
);
assert_votes!( assert_votes!(
votes, votes,
// There should only be one value if there's a duplicate // There should only be one value if there's a duplicate
@ -1150,8 +1141,7 @@ mod test {
assert_eq!( assert_eq!(
// Favour the highest block number when there are no votes. // Favour the highest block number when there are no votes.
vote_data(&no_votes[2]), vote_data(&no_votes[2]),
find_winning_vote(Eth1DataVoteCount::from_iter(no_votes.into_iter())) find_winning_vote(no_votes.into_iter().collect()).expect("should find winner")
.expect("should find winner")
); );
} }
@ -1162,8 +1152,7 @@ mod test {
assert_eq!( assert_eq!(
// Favour the highest block number when there are equal votes. // Favour the highest block number when there are equal votes.
vote_data(&votes[2]), vote_data(&votes[2]),
find_winning_vote(Eth1DataVoteCount::from_iter(votes.into_iter())) find_winning_vote(votes.into_iter().collect()).expect("should find winner")
.expect("should find winner")
); );
} }
@ -1174,8 +1163,7 @@ mod test {
assert_eq!( assert_eq!(
// Favour the highest vote over the highest block number. // Favour the highest vote over the highest block number.
vote_data(&votes[3]), vote_data(&votes[3]),
find_winning_vote(Eth1DataVoteCount::from_iter(votes.into_iter())) find_winning_vote(votes.into_iter().collect()).expect("should find winner")
.expect("should find winner")
); );
} }
@ -1186,8 +1174,7 @@ mod test {
assert_eq!( assert_eq!(
// Favour the highest block number for tying votes. // Favour the highest block number for tying votes.
vote_data(&votes[3]), vote_data(&votes[3]),
find_winning_vote(Eth1DataVoteCount::from_iter(votes.into_iter())) find_winning_vote(votes.into_iter().collect()).expect("should find winner")
.expect("should find winner")
); );
} }
@ -1198,8 +1185,7 @@ mod test {
assert_eq!( assert_eq!(
// Favour the highest block number for tying votes. // Favour the highest block number for tying votes.
vote_data(&votes[0]), vote_data(&votes[0]),
find_winning_vote(Eth1DataVoteCount::from_iter(votes.into_iter())) find_winning_vote(votes.into_iter().collect()).expect("should find winner")
.expect("should find winner")
); );
} }
} }

View File

@ -238,6 +238,7 @@ mod test {
builder.build() builder.build()
} }
#[allow(clippy::needless_range_loop)]
fn check_cache_get(cache: &ValidatorPubkeyCache, keypairs: &[Keypair]) { fn check_cache_get(cache: &ValidatorPubkeyCache, keypairs: &[Keypair]) {
let validator_count = keypairs.len(); let validator_count = keypairs.len();

View File

@ -160,7 +160,6 @@ fn ensure_dir_exists(path: PathBuf) -> Result<PathBuf, String> {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use toml;
#[test] #[test]
fn serde() { fn serde() {

View File

@ -397,7 +397,7 @@ pub mod tests {
.expect("should get the four from the full tree"); .expect("should get the four from the full tree");
assert_eq!( assert_eq!(
deposits.len(), deposits.len(),
4 as usize, 4_usize,
"should get 4 deposits from full tree" "should get 4 deposits from full tree"
); );
assert_eq!( assert_eq!(
@ -418,7 +418,7 @@ pub mod tests {
.expect("should get the half tree"); .expect("should get the half tree");
assert_eq!( assert_eq!(
deposits.len(), deposits.len(),
4 as usize, 4_usize,
"should get 4 deposits from half tree" "should get 4 deposits from half tree"
); );
assert_eq!( assert_eq!(

View File

@ -1179,7 +1179,6 @@ async fn download_eth1_block(
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use toml;
use types::MainnetEthSpec; use types::MainnetEthSpec;
#[test] #[test]

View File

@ -964,8 +964,10 @@ mod tests {
async fn build_discovery() -> Discovery<E> { async fn build_discovery() -> Discovery<E> {
let keypair = libp2p::identity::Keypair::generate_secp256k1(); let keypair = libp2p::identity::Keypair::generate_secp256k1();
let mut config = NetworkConfig::default(); let config = NetworkConfig {
config.discovery_port = unused_port(); discovery_port: unused_port(),
..Default::default()
};
let enr_key: CombinedKey = CombinedKey::from_libp2p(&keypair).unwrap(); let enr_key: CombinedKey = CombinedKey::from_libp2p(&keypair).unwrap();
let enr: Enr = build_enr::<E>(&enr_key, &config, EnrForkId::default()).unwrap(); let enr: Enr = build_enr::<E>(&enr_key, &config, EnrForkId::default()).unwrap();
let log = build_log(slog::Level::Debug, false); let log = build_log(slog::Level::Debug, false);
@ -1055,7 +1057,7 @@ mod tests {
discovery.queued_queries.push_back(QueryType::FindPeers); discovery.queued_queries.push_back(QueryType::FindPeers);
discovery discovery
.queued_queries .queued_queries
.push_back(QueryType::Subnet(subnet_query.clone())); .push_back(QueryType::Subnet(subnet_query));
// Process Subnet query and FindPeers afterwards. // Process Subnet query and FindPeers afterwards.
assert!(discovery.process_queue()); assert!(discovery.process_queue());
} }
@ -1101,7 +1103,7 @@ mod tests {
// Unwanted enr for the given grouped query // Unwanted enr for the given grouped query
let enr3 = make_enr(vec![3]); let enr3 = make_enr(vec![3]);
let enrs: Vec<Enr> = vec![enr1.clone(), enr2.clone(), enr3.clone()]; let enrs: Vec<Enr> = vec![enr1.clone(), enr2, enr3];
let results = discovery let results = discovery
.process_completed_queries(QueryResult(query, Ok(enrs))) .process_completed_queries(QueryResult(query, Ok(enrs)))
.unwrap(); .unwrap();

View File

@ -645,6 +645,7 @@ mod tests {
} }
#[test] #[test]
#[allow(clippy::float_cmp)]
fn test_peer_connected_successfully() { fn test_peer_connected_successfully() {
let mut pdb = get_db(); let mut pdb = get_db();
let random_peer = PeerId::random(); let random_peer = PeerId::random();
@ -745,7 +746,7 @@ mod tests {
assert!(the_best.is_some()); assert!(the_best.is_some());
// Consistency check // Consistency check
let best_peers = pdb.best_peers_by_status(PeerInfo::is_connected); let best_peers = pdb.best_peers_by_status(PeerInfo::is_connected);
assert_eq!(the_best, best_peers.iter().next().map(|p| p.0)); assert_eq!(the_best.unwrap(), best_peers.get(0).unwrap().0);
} }
#[test] #[test]
@ -839,7 +840,7 @@ mod tests {
pdb.notify_disconnect(&random_peer2); pdb.notify_disconnect(&random_peer2);
pdb.disconnect_and_ban(&random_peer3); pdb.disconnect_and_ban(&random_peer3);
pdb.notify_disconnect(&random_peer3); pdb.notify_disconnect(&random_peer3);
pdb.connect_ingoing(&random_peer, multiaddr.clone(), None); pdb.connect_ingoing(&random_peer, multiaddr, None);
assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count()); assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count());
assert_eq!( assert_eq!(
pdb.banned_peers_count.banned_peers(), pdb.banned_peers_count.banned_peers(),
@ -1021,10 +1022,11 @@ mod tests {
} }
#[test] #[test]
#[allow(clippy::float_cmp)]
fn test_trusted_peers_score() { fn test_trusted_peers_score() {
let trusted_peer = PeerId::random(); let trusted_peer = PeerId::random();
let log = build_log(slog::Level::Debug, false); let log = build_log(slog::Level::Debug, false);
let mut pdb: PeerDB<M> = PeerDB::new(vec![trusted_peer.clone()], &log); let mut pdb: PeerDB<M> = PeerDB::new(vec![trusted_peer], &log);
pdb.connect_ingoing(&trusted_peer, "/ip4/0.0.0.0".parse().unwrap(), None); pdb.connect_ingoing(&trusted_peer, "/ip4/0.0.0.0".parse().unwrap(), None);

View File

@ -346,6 +346,7 @@ mod tests {
use super::*; use super::*;
#[test] #[test]
#[allow(clippy::float_cmp)]
fn test_reputation_change() { fn test_reputation_change() {
let mut score = Score::default(); let mut score = Score::default();
@ -375,6 +376,7 @@ mod tests {
} }
#[test] #[test]
#[allow(clippy::float_cmp)]
fn test_ban_time() { fn test_ban_time() {
let mut score = RealScore::default(); let mut score = RealScore::default();
let now = Instant::now(); let now = Instant::now();
@ -402,6 +404,7 @@ mod tests {
} }
#[test] #[test]
#[allow(clippy::float_cmp)]
fn test_ignored_gossipsub_score() { fn test_ignored_gossipsub_score() {
let mut score = Score::default(); let mut score = Score::default();
score.update_gossipsub_score(GOSSIPSUB_GREYLIST_THRESHOLD, true); score.update_gossipsub_score(GOSSIPSUB_GREYLIST_THRESHOLD, true);

View File

@ -126,8 +126,7 @@ pub async fn build_libp2p_instance(
#[allow(dead_code)] #[allow(dead_code)]
pub fn get_enr(node: &LibP2PService<E>) -> Enr { pub fn get_enr(node: &LibP2PService<E>) -> Enr {
let enr = node.swarm.local_enr().clone(); node.swarm.local_enr()
enr
} }
// Returns `n` libp2p peers in fully connected topology. // Returns `n` libp2p peers in fully connected topology.

View File

@ -17,6 +17,7 @@ type E = MinimalEthSpec;
// Tests the STATUS RPC message // Tests the STATUS RPC message
#[test] #[test]
#[allow(clippy::single_match)]
fn test_status_rpc() { fn test_status_rpc() {
// set up the logging. The level and enabled logging or not // set up the logging. The level and enabled logging or not
let log_level = Level::Debug; let log_level = Level::Debug;
@ -113,6 +114,7 @@ fn test_status_rpc() {
// Tests a streamed BlocksByRange RPC Message // Tests a streamed BlocksByRange RPC Message
#[test] #[test]
#[allow(clippy::single_match)]
fn test_blocks_by_range_chunked_rpc() { fn test_blocks_by_range_chunked_rpc() {
// set up the logging. The level and enabled logging or not // set up the logging. The level and enabled logging or not
let log_level = Level::Trace; let log_level = Level::Trace;
@ -199,7 +201,7 @@ fn test_blocks_by_range_chunked_rpc() {
warn!(log, "Receiver got request"); warn!(log, "Receiver got request");
for _ in 1..=messages_to_send { for _ in 1..=messages_to_send {
receiver.swarm.send_successful_response( receiver.swarm.send_successful_response(
peer_id.clone(), peer_id,
id, id,
rpc_response.clone(), rpc_response.clone(),
); );
@ -340,8 +342,8 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() {
messages_sent += 1; messages_sent += 1;
let (peer_id, stream_id) = message_info.as_ref().unwrap(); let (peer_id, stream_id) = message_info.as_ref().unwrap();
receiver.swarm.send_successful_response( receiver.swarm.send_successful_response(
peer_id.clone(), *peer_id,
stream_id.clone(), *stream_id,
rpc_response.clone(), rpc_response.clone(),
); );
debug!(log, "Sending message {}", messages_sent); debug!(log, "Sending message {}", messages_sent);
@ -365,6 +367,7 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() {
// Tests an empty response to a BlocksByRange RPC Message // Tests an empty response to a BlocksByRange RPC Message
#[test] #[test]
#[allow(clippy::single_match)]
fn test_blocks_by_range_single_empty_rpc() { fn test_blocks_by_range_single_empty_rpc() {
// set up the logging. The level and enabled logging or not // set up the logging. The level and enabled logging or not
let log_level = Level::Trace; let log_level = Level::Trace;
@ -448,7 +451,7 @@ fn test_blocks_by_range_single_empty_rpc() {
for _ in 1..=messages_to_send { for _ in 1..=messages_to_send {
receiver.swarm.send_successful_response( receiver.swarm.send_successful_response(
peer_id.clone(), peer_id,
id, id,
rpc_response.clone(), rpc_response.clone(),
); );
@ -480,6 +483,7 @@ fn test_blocks_by_range_single_empty_rpc() {
// which is greater than the Snappy frame size. Hence, this test // which is greater than the Snappy frame size. Hence, this test
// serves to test the snappy framing format as well. // serves to test the snappy framing format as well.
#[test] #[test]
#[allow(clippy::single_match)]
fn test_blocks_by_root_chunked_rpc() { fn test_blocks_by_root_chunked_rpc() {
// set up the logging. The level and enabled logging or not // set up the logging. The level and enabled logging or not
let log_level = Level::Debug; let log_level = Level::Debug;
@ -565,7 +569,7 @@ fn test_blocks_by_root_chunked_rpc() {
for _ in 1..=messages_to_send { for _ in 1..=messages_to_send {
receiver.swarm.send_successful_response( receiver.swarm.send_successful_response(
peer_id.clone(), peer_id,
id, id,
rpc_response.clone(), rpc_response.clone(),
); );
@ -715,8 +719,8 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() {
messages_sent += 1; messages_sent += 1;
let (peer_id, stream_id) = message_info.as_ref().unwrap(); let (peer_id, stream_id) = message_info.as_ref().unwrap();
receiver.swarm.send_successful_response( receiver.swarm.send_successful_response(
peer_id.clone(), *peer_id,
stream_id.clone(), *stream_id,
rpc_response.clone(), rpc_response.clone(),
); );
debug!(log, "Sending message {}", messages_sent); debug!(log, "Sending message {}", messages_sent);
@ -740,6 +744,7 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() {
// Tests a Goodbye RPC message // Tests a Goodbye RPC message
#[test] #[test]
#[allow(clippy::single_match)]
fn test_goodbye_rpc() { fn test_goodbye_rpc() {
// set up the logging. The level and enabled logging or not // set up the logging. The level and enabled logging or not
let log_level = Level::Trace; let log_level = Level::Trace;

View File

@ -20,6 +20,7 @@ use types::{Attestation, EthSpec, Slot, SubnetId, ValidatorSubscription};
use crate::metrics; use crate::metrics;
#[cfg(test)]
mod tests; mod tests;
/// The minimum number of slots ahead that we attempt to discover peers for a subscription. If the /// The minimum number of slots ahead that we attempt to discover peers for a subscription. If the

View File

@ -1,432 +1,424 @@
#[cfg(test)] use super::*;
mod tests { use beacon_chain::{
use super::super::*; builder::{BeaconChainBuilder, Witness},
use beacon_chain::{ eth1_chain::CachingEth1Backend,
builder::{BeaconChainBuilder, Witness}, };
eth1_chain::CachingEth1Backend, use futures::Stream;
}; use genesis::{generate_deterministic_keypairs, interop_genesis_state};
use futures::Stream; use lazy_static::lazy_static;
use genesis::{generate_deterministic_keypairs, interop_genesis_state}; use matches::assert_matches;
use lazy_static::lazy_static; use slog::Logger;
use matches::assert_matches; use sloggers::{null::NullLoggerBuilder, Build};
use slog::Logger; use slot_clock::{SlotClock, SystemTimeSlotClock};
use sloggers::{null::NullLoggerBuilder, Build}; use std::time::{Duration, SystemTime};
use slot_clock::{SlotClock, SystemTimeSlotClock}; use store::config::StoreConfig;
use std::time::{Duration, SystemTime}; use store::{HotColdDB, MemoryStore};
use store::config::StoreConfig; use tempfile::tempdir;
use store::{HotColdDB, MemoryStore}; use types::{CommitteeIndex, EthSpec, MinimalEthSpec};
use tempfile::tempdir;
use types::{CommitteeIndex, EthSpec, MinimalEthSpec};
const SLOT_DURATION_MILLIS: u64 = 400; const SLOT_DURATION_MILLIS: u64 = 400;
type TestBeaconChainType = Witness< type TestBeaconChainType = Witness<
SystemTimeSlotClock, SystemTimeSlotClock,
CachingEth1Backend<MinimalEthSpec>, CachingEth1Backend<MinimalEthSpec>,
MinimalEthSpec, MinimalEthSpec,
MemoryStore<MinimalEthSpec>, MemoryStore<MinimalEthSpec>,
MemoryStore<MinimalEthSpec>, MemoryStore<MinimalEthSpec>,
>; >;
pub struct TestBeaconChain { pub struct TestBeaconChain {
chain: Arc<BeaconChain<TestBeaconChainType>>, chain: Arc<BeaconChain<TestBeaconChainType>>,
} }
impl TestBeaconChain { impl TestBeaconChain {
pub fn new_with_system_clock() -> Self { pub fn new_with_system_clock() -> Self {
let data_dir = tempdir().expect("should create temporary data_dir"); let data_dir = tempdir().expect("should create temporary data_dir");
let spec = MinimalEthSpec::default_spec(); let spec = MinimalEthSpec::default_spec();
let keypairs = generate_deterministic_keypairs(1); let keypairs = generate_deterministic_keypairs(1);
let log = get_logger();
let store =
HotColdDB::open_ephemeral(StoreConfig::default(), spec.clone(), log.clone())
.unwrap();
let (shutdown_tx, _) = futures::channel::mpsc::channel(1);
let chain = Arc::new(
BeaconChainBuilder::new(MinimalEthSpec)
.logger(log.clone())
.custom_spec(spec.clone())
.store(Arc::new(store))
.data_dir(data_dir.path().to_path_buf())
.genesis_state(
interop_genesis_state::<MinimalEthSpec>(&keypairs, 0, &spec)
.expect("should generate interop state"),
)
.expect("should build state using recent genesis")
.dummy_eth1_backend()
.expect("should build dummy backend")
.slot_clock(SystemTimeSlotClock::new(
Slot::new(0),
Duration::from_secs(recent_genesis_time()),
Duration::from_millis(SLOT_DURATION_MILLIS),
))
.shutdown_sender(shutdown_tx)
.monitor_validators(true, vec![], log)
.build()
.expect("should build"),
);
Self { chain }
}
}
pub fn recent_genesis_time() -> u64 {
SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap()
.as_secs()
}
fn get_logger() -> Logger {
NullLoggerBuilder.build().expect("logger should build")
}
lazy_static! {
static ref CHAIN: TestBeaconChain = TestBeaconChain::new_with_system_clock();
}
fn get_attestation_service() -> AttestationService<TestBeaconChainType> {
let log = get_logger(); let log = get_logger();
let config = NetworkConfig::default(); let store =
HotColdDB::open_ephemeral(StoreConfig::default(), spec.clone(), log.clone()).unwrap();
let beacon_chain = CHAIN.chain.clone(); let (shutdown_tx, _) = futures::channel::mpsc::channel(1);
AttestationService::new(beacon_chain, &config, &log) let chain = Arc::new(
} BeaconChainBuilder::new(MinimalEthSpec)
.logger(log.clone())
fn get_subscription( .custom_spec(spec.clone())
validator_index: u64, .store(Arc::new(store))
attestation_committee_index: CommitteeIndex, .data_dir(data_dir.path().to_path_buf())
slot: Slot, .genesis_state(
committee_count_at_slot: u64, interop_genesis_state::<MinimalEthSpec>(&keypairs, 0, &spec)
) -> ValidatorSubscription { .expect("should generate interop state"),
let is_aggregator = true;
ValidatorSubscription {
validator_index,
attestation_committee_index,
slot,
committee_count_at_slot,
is_aggregator,
}
}
fn get_subscriptions(
validator_count: u64,
slot: Slot,
committee_count_at_slot: u64,
) -> Vec<ValidatorSubscription> {
(0..validator_count)
.map(|validator_index| {
get_subscription(
validator_index,
validator_index,
slot,
committee_count_at_slot,
) )
}) .expect("should build state using recent genesis")
.collect() .dummy_eth1_backend()
.expect("should build dummy backend")
.slot_clock(SystemTimeSlotClock::new(
Slot::new(0),
Duration::from_secs(recent_genesis_time()),
Duration::from_millis(SLOT_DURATION_MILLIS),
))
.shutdown_sender(shutdown_tx)
.monitor_validators(true, vec![], log)
.build()
.expect("should build"),
);
Self { chain }
} }
}
// gets a number of events from the subscription service, or returns none if it times out after a number pub fn recent_genesis_time() -> u64 {
// of slots SystemTime::now()
async fn get_events<S: Stream<Item = AttServiceMessage> + Unpin>( .duration_since(SystemTime::UNIX_EPOCH)
stream: &mut S, .unwrap()
num_events: Option<usize>, .as_secs()
num_slots_before_timeout: u32, }
) -> Vec<AttServiceMessage> {
let mut events = Vec::new();
let collect_stream_fut = async { fn get_logger() -> Logger {
loop { NullLoggerBuilder.build().expect("logger should build")
if let Some(result) = stream.next().await { }
events.push(result);
if let Some(num) = num_events { lazy_static! {
if events.len() == num { static ref CHAIN: TestBeaconChain = TestBeaconChain::new_with_system_clock();
return; }
}
fn get_attestation_service() -> AttestationService<TestBeaconChainType> {
let log = get_logger();
let config = NetworkConfig::default();
let beacon_chain = CHAIN.chain.clone();
AttestationService::new(beacon_chain, &config, &log)
}
fn get_subscription(
validator_index: u64,
attestation_committee_index: CommitteeIndex,
slot: Slot,
committee_count_at_slot: u64,
) -> ValidatorSubscription {
let is_aggregator = true;
ValidatorSubscription {
validator_index,
attestation_committee_index,
slot,
committee_count_at_slot,
is_aggregator,
}
}
fn get_subscriptions(
validator_count: u64,
slot: Slot,
committee_count_at_slot: u64,
) -> Vec<ValidatorSubscription> {
(0..validator_count)
.map(|validator_index| {
get_subscription(
validator_index,
validator_index,
slot,
committee_count_at_slot,
)
})
.collect()
}
// gets a number of events from the subscription service, or returns none if it times out after a number
// of slots
async fn get_events<S: Stream<Item = AttServiceMessage> + Unpin>(
stream: &mut S,
num_events: Option<usize>,
num_slots_before_timeout: u32,
) -> Vec<AttServiceMessage> {
let mut events = Vec::new();
let collect_stream_fut = async {
loop {
if let Some(result) = stream.next().await {
events.push(result);
if let Some(num) = num_events {
if events.len() == num {
return;
} }
} }
} }
};
tokio::select! {
_ = collect_stream_fut => {return events}
_ = tokio::time::sleep(
Duration::from_millis(SLOT_DURATION_MILLIS) * num_slots_before_timeout,
) => { return events; }
}
}
#[tokio::test]
async fn subscribe_current_slot_wait_for_unsubscribe() {
// subscription config
let validator_index = 1;
let committee_index = 1;
// Keep a low subscription slot so that there are no additional subnet discovery events.
let subscription_slot = 0;
let committee_count = 1;
// create the attestation service and subscriptions
let mut attestation_service = get_attestation_service();
let current_slot = attestation_service
.beacon_chain
.slot_clock
.now()
.expect("Could not get current slot");
let subscriptions = vec![get_subscription(
validator_index,
committee_index,
current_slot + Slot::new(subscription_slot),
committee_count,
)];
// submit the subscriptions
attestation_service
.validator_subscriptions(subscriptions)
.unwrap();
// not enough time for peer discovery, just subscribe, unsubscribe
let subnet_id = SubnetId::compute_subnet::<MinimalEthSpec>(
current_slot + Slot::new(subscription_slot),
committee_index,
committee_count,
&attestation_service.beacon_chain.spec,
)
.unwrap();
let expected = vec![
AttServiceMessage::Subscribe(subnet_id),
AttServiceMessage::Unsubscribe(subnet_id),
];
// Wait for 1 slot duration to get the unsubscription event
let events = get_events(&mut attestation_service, None, 1).await;
assert_matches!(
events[..3],
[AttServiceMessage::DiscoverPeers(_), AttServiceMessage::Subscribe(_any1), AttServiceMessage::EnrAdd(_any3)]
);
// If the long lived and short lived subnets are the same, there should be no more events
// as we don't resubscribe already subscribed subnets.
if !attestation_service.random_subnets.contains(&subnet_id) {
assert_eq!(expected[..], events[3..]);
} }
// Should be subscribed to only 1 long lived subnet after unsubscription. };
assert_eq!(attestation_service.subscription_count(), 1);
}
/// Test to verify that we are not unsubscribing to a subnet before a required subscription. tokio::select! {
#[tokio::test] _ = collect_stream_fut => {return events}
async fn test_same_subnet_unsubscription() { _ = tokio::time::sleep(
// subscription config Duration::from_millis(SLOT_DURATION_MILLIS) * num_slots_before_timeout,
let validator_index = 1; ) => { return events; }
let committee_count = 1;
// Makes 2 validator subscriptions to the same subnet but at different slots.
// There should be just 1 unsubscription event for the later slot subscription (subscription_slot2).
let subscription_slot1 = 0;
let subscription_slot2 = 1;
let com1 = 1;
let com2 = 0;
// create the attestation service and subscriptions
let mut attestation_service = get_attestation_service();
let current_slot = attestation_service
.beacon_chain
.slot_clock
.now()
.expect("Could not get current slot");
let sub1 = get_subscription(
validator_index,
com1,
current_slot + Slot::new(subscription_slot1),
committee_count,
);
let sub2 = get_subscription(
validator_index,
com2,
current_slot + Slot::new(subscription_slot2),
committee_count,
);
let subnet_id1 = SubnetId::compute_subnet::<MinimalEthSpec>(
current_slot + Slot::new(subscription_slot1),
com1,
committee_count,
&attestation_service.beacon_chain.spec,
)
.unwrap();
let subnet_id2 = SubnetId::compute_subnet::<MinimalEthSpec>(
current_slot + Slot::new(subscription_slot2),
com2,
committee_count,
&attestation_service.beacon_chain.spec,
)
.unwrap();
// Assert that subscriptions are different but their subnet is the same
assert_ne!(sub1, sub2);
assert_eq!(subnet_id1, subnet_id2);
// submit the subscriptions
attestation_service
.validator_subscriptions(vec![sub1, sub2])
.unwrap();
// Unsubscription event should happen at slot 2 (since subnet id's are the same, unsubscription event should be at higher slot + 1)
// Get all events for 1 slot duration (unsubscription event should happen after 2 slot durations).
let events = get_events(&mut attestation_service, None, 1).await;
assert_matches!(
events[..3],
[AttServiceMessage::DiscoverPeers(_), AttServiceMessage::Subscribe(_any1), AttServiceMessage::EnrAdd(_any3)]
);
let expected = AttServiceMessage::Subscribe(subnet_id1);
// Should be still subscribed to 1 long lived and 1 short lived subnet if both are different.
if !attestation_service.random_subnets.contains(&subnet_id1) {
assert_eq!(expected, events[3]);
assert_eq!(attestation_service.subscription_count(), 2);
} else {
assert_eq!(attestation_service.subscription_count(), 1);
} }
}
// Get event for 1 more slot duration, we should get the unsubscribe event now.
let unsubscribe_event = get_events(&mut attestation_service, None, 1).await; #[tokio::test]
async fn subscribe_current_slot_wait_for_unsubscribe() {
// If the long lived and short lived subnets are different, we should get an unsubscription event. // subscription config
if !attestation_service.random_subnets.contains(&subnet_id1) { let validator_index = 1;
assert_eq!( let committee_index = 1;
[AttServiceMessage::Unsubscribe(subnet_id1)], // Keep a low subscription slot so that there are no additional subnet discovery events.
unsubscribe_event[..] let subscription_slot = 0;
); let committee_count = 1;
}
// create the attestation service and subscriptions
// Should be subscribed to only 1 long lived subnet after unsubscription. let mut attestation_service = get_attestation_service();
assert_eq!(attestation_service.subscription_count(), 1); let current_slot = attestation_service
} .beacon_chain
.slot_clock
#[tokio::test] .now()
async fn subscribe_all_random_subnets() { .expect("Could not get current slot");
let attestation_subnet_count = MinimalEthSpec::default_spec().attestation_subnet_count;
let subscription_slot = 10; let subscriptions = vec![get_subscription(
let subscription_count = attestation_subnet_count; validator_index,
let committee_count = 1; committee_index,
current_slot + Slot::new(subscription_slot),
// create the attestation service and subscriptions committee_count,
let mut attestation_service = get_attestation_service(); )];
let current_slot = attestation_service
.beacon_chain // submit the subscriptions
.slot_clock attestation_service
.now() .validator_subscriptions(subscriptions)
.expect("Could not get current slot"); .unwrap();
let subscriptions = get_subscriptions( // not enough time for peer discovery, just subscribe, unsubscribe
subscription_count, let subnet_id = SubnetId::compute_subnet::<MinimalEthSpec>(
current_slot + subscription_slot, current_slot + Slot::new(subscription_slot),
committee_count, committee_index,
); committee_count,
&attestation_service.beacon_chain.spec,
// submit the subscriptions )
attestation_service .unwrap();
.validator_subscriptions(subscriptions) let expected = vec![
.unwrap(); AttServiceMessage::Subscribe(subnet_id),
AttServiceMessage::Unsubscribe(subnet_id),
let events = get_events(&mut attestation_service, None, 3).await; ];
let mut discover_peer_count = 0;
let mut enr_add_count = 0; // Wait for 1 slot duration to get the unsubscription event
let mut unexpected_msg_count = 0; let events = get_events(&mut attestation_service, None, 1).await;
assert_matches!(
for event in &events { events[..3],
match event { [AttServiceMessage::DiscoverPeers(_), AttServiceMessage::Subscribe(_any1), AttServiceMessage::EnrAdd(_any3)]
AttServiceMessage::DiscoverPeers(_) => { );
discover_peer_count = discover_peer_count + 1
} // If the long lived and short lived subnets are the same, there should be no more events
AttServiceMessage::Subscribe(_any_subnet) => {} // as we don't resubscribe already subscribed subnets.
AttServiceMessage::EnrAdd(_any_subnet) => enr_add_count = enr_add_count + 1, if !attestation_service.random_subnets.contains(&subnet_id) {
_ => unexpected_msg_count = unexpected_msg_count + 1, assert_eq!(expected[..], events[3..]);
} }
} // Should be subscribed to only 1 long lived subnet after unsubscription.
assert_eq!(attestation_service.subscription_count(), 1);
// The bulk discovery request length should be equal to validator_count }
let bulk_discovery_event = events.last().unwrap();
if let AttServiceMessage::DiscoverPeers(d) = bulk_discovery_event { /// Test to verify that we are not unsubscribing to a subnet before a required subscription.
assert_eq!(d.len(), attestation_subnet_count as usize); #[tokio::test]
} else { async fn test_same_subnet_unsubscription() {
panic!("Unexpected event {:?}", bulk_discovery_event); // subscription config
} let validator_index = 1;
let committee_count = 1;
// 64 `DiscoverPeer` requests of length 1 corresponding to random subnets
// and 1 `DiscoverPeer` request corresponding to bulk subnet discovery. // Makes 2 validator subscriptions to the same subnet but at different slots.
assert_eq!(discover_peer_count, subscription_count + 1); // There should be just 1 unsubscription event for the later slot subscription (subscription_slot2).
assert_eq!(attestation_service.subscription_count(), 64); let subscription_slot1 = 0;
assert_eq!(enr_add_count, 64); let subscription_slot2 = 1;
assert_eq!(unexpected_msg_count, 0); let com1 = 1;
// test completed successfully let com2 = 0;
}
// create the attestation service and subscriptions
#[tokio::test] let mut attestation_service = get_attestation_service();
async fn subscribe_all_random_subnets_plus_one() { let current_slot = attestation_service
let attestation_subnet_count = MinimalEthSpec::default_spec().attestation_subnet_count; .beacon_chain
let subscription_slot = 10; .slot_clock
// the 65th subscription should result in no more messages than the previous scenario .now()
let subscription_count = attestation_subnet_count + 1; .expect("Could not get current slot");
let committee_count = 1;
let sub1 = get_subscription(
// create the attestation service and subscriptions validator_index,
let mut attestation_service = get_attestation_service(); com1,
let current_slot = attestation_service current_slot + Slot::new(subscription_slot1),
.beacon_chain committee_count,
.slot_clock );
.now()
.expect("Could not get current slot"); let sub2 = get_subscription(
validator_index,
let subscriptions = get_subscriptions( com2,
subscription_count, current_slot + Slot::new(subscription_slot2),
current_slot + subscription_slot, committee_count,
committee_count, );
);
let subnet_id1 = SubnetId::compute_subnet::<MinimalEthSpec>(
// submit the subscriptions current_slot + Slot::new(subscription_slot1),
attestation_service com1,
.validator_subscriptions(subscriptions) committee_count,
.unwrap(); &attestation_service.beacon_chain.spec,
)
let events = get_events(&mut attestation_service, None, 3).await; .unwrap();
let mut discover_peer_count = 0;
let mut enr_add_count = 0; let subnet_id2 = SubnetId::compute_subnet::<MinimalEthSpec>(
let mut unexpected_msg_count = 0; current_slot + Slot::new(subscription_slot2),
com2,
for event in &events { committee_count,
match event { &attestation_service.beacon_chain.spec,
AttServiceMessage::DiscoverPeers(_) => { )
discover_peer_count = discover_peer_count + 1 .unwrap();
}
AttServiceMessage::Subscribe(_any_subnet) => {} // Assert that subscriptions are different but their subnet is the same
AttServiceMessage::EnrAdd(_any_subnet) => enr_add_count = enr_add_count + 1, assert_ne!(sub1, sub2);
_ => unexpected_msg_count = unexpected_msg_count + 1, assert_eq!(subnet_id1, subnet_id2);
}
} // submit the subscriptions
attestation_service
// The bulk discovery request length shouldn't exceed max attestation_subnet_count .validator_subscriptions(vec![sub1, sub2])
let bulk_discovery_event = events.last().unwrap(); .unwrap();
if let AttServiceMessage::DiscoverPeers(d) = bulk_discovery_event {
assert_eq!(d.len(), attestation_subnet_count as usize); // Unsubscription event should happen at slot 2 (since subnet id's are the same, unsubscription event should be at higher slot + 1)
} else { // Get all events for 1 slot duration (unsubscription event should happen after 2 slot durations).
panic!("Unexpected event {:?}", bulk_discovery_event); let events = get_events(&mut attestation_service, None, 1).await;
} assert_matches!(
// 64 `DiscoverPeer` requests of length 1 corresponding to random subnets events[..3],
// and 1 `DiscoverPeer` request corresponding to the bulk subnet discovery. [AttServiceMessage::DiscoverPeers(_), AttServiceMessage::Subscribe(_any1), AttServiceMessage::EnrAdd(_any3)]
// For the 65th subscription, the call to `subscribe_to_random_subnets` is not made because we are at capacity. );
assert_eq!(discover_peer_count, 64 + 1);
assert_eq!(attestation_service.subscription_count(), 64); let expected = AttServiceMessage::Subscribe(subnet_id1);
assert_eq!(enr_add_count, 64);
assert_eq!(unexpected_msg_count, 0); // Should be still subscribed to 1 long lived and 1 short lived subnet if both are different.
} if !attestation_service.random_subnets.contains(&subnet_id1) {
assert_eq!(expected, events[3]);
assert_eq!(attestation_service.subscription_count(), 2);
} else {
assert_eq!(attestation_service.subscription_count(), 1);
}
// Get event for 1 more slot duration, we should get the unsubscribe event now.
let unsubscribe_event = get_events(&mut attestation_service, None, 1).await;
// If the long lived and short lived subnets are different, we should get an unsubscription event.
if !attestation_service.random_subnets.contains(&subnet_id1) {
assert_eq!(
[AttServiceMessage::Unsubscribe(subnet_id1)],
unsubscribe_event[..]
);
}
// Should be subscribed to only 1 long lived subnet after unsubscription.
assert_eq!(attestation_service.subscription_count(), 1);
}
#[tokio::test]
async fn subscribe_all_random_subnets() {
let attestation_subnet_count = MinimalEthSpec::default_spec().attestation_subnet_count;
let subscription_slot = 10;
let subscription_count = attestation_subnet_count;
let committee_count = 1;
// create the attestation service and subscriptions
let mut attestation_service = get_attestation_service();
let current_slot = attestation_service
.beacon_chain
.slot_clock
.now()
.expect("Could not get current slot");
let subscriptions = get_subscriptions(
subscription_count,
current_slot + subscription_slot,
committee_count,
);
// submit the subscriptions
attestation_service
.validator_subscriptions(subscriptions)
.unwrap();
let events = get_events(&mut attestation_service, None, 3).await;
let mut discover_peer_count = 0;
let mut enr_add_count = 0;
let mut unexpected_msg_count = 0;
for event in &events {
match event {
AttServiceMessage::DiscoverPeers(_) => discover_peer_count += 1,
AttServiceMessage::Subscribe(_any_subnet) => {}
AttServiceMessage::EnrAdd(_any_subnet) => enr_add_count += 1,
_ => unexpected_msg_count += 1,
}
}
// The bulk discovery request length should be equal to validator_count
let bulk_discovery_event = events.last().unwrap();
if let AttServiceMessage::DiscoverPeers(d) = bulk_discovery_event {
assert_eq!(d.len(), attestation_subnet_count as usize);
} else {
panic!("Unexpected event {:?}", bulk_discovery_event);
}
// 64 `DiscoverPeer` requests of length 1 corresponding to random subnets
// and 1 `DiscoverPeer` request corresponding to bulk subnet discovery.
assert_eq!(discover_peer_count, subscription_count + 1);
assert_eq!(attestation_service.subscription_count(), 64);
assert_eq!(enr_add_count, 64);
assert_eq!(unexpected_msg_count, 0);
// test completed successfully
}
#[tokio::test]
async fn subscribe_all_random_subnets_plus_one() {
let attestation_subnet_count = MinimalEthSpec::default_spec().attestation_subnet_count;
let subscription_slot = 10;
// the 65th subscription should result in no more messages than the previous scenario
let subscription_count = attestation_subnet_count + 1;
let committee_count = 1;
// create the attestation service and subscriptions
let mut attestation_service = get_attestation_service();
let current_slot = attestation_service
.beacon_chain
.slot_clock
.now()
.expect("Could not get current slot");
let subscriptions = get_subscriptions(
subscription_count,
current_slot + subscription_slot,
committee_count,
);
// submit the subscriptions
attestation_service
.validator_subscriptions(subscriptions)
.unwrap();
let events = get_events(&mut attestation_service, None, 3).await;
let mut discover_peer_count = 0;
let mut enr_add_count = 0;
let mut unexpected_msg_count = 0;
for event in &events {
match event {
AttServiceMessage::DiscoverPeers(_) => discover_peer_count += 1,
AttServiceMessage::Subscribe(_any_subnet) => {}
AttServiceMessage::EnrAdd(_any_subnet) => enr_add_count += 1,
_ => unexpected_msg_count += 1,
}
}
// The bulk discovery request length shouldn't exceed max attestation_subnet_count
let bulk_discovery_event = events.last().unwrap();
if let AttServiceMessage::DiscoverPeers(d) = bulk_discovery_event {
assert_eq!(d.len(), attestation_subnet_count as usize);
} else {
panic!("Unexpected event {:?}", bulk_discovery_event);
}
// 64 `DiscoverPeer` requests of length 1 corresponding to random subnets
// and 1 `DiscoverPeer` request corresponding to the bulk subnet discovery.
// For the 65th subscription, the call to `subscribe_to_random_subnets` is not made because we are at capacity.
assert_eq!(discover_peer_count, 64 + 1);
assert_eq!(attestation_service.subscription_count(), 64);
assert_eq!(enr_add_count, 64);
assert_eq!(unexpected_msg_count, 0);
} }

View File

@ -432,7 +432,7 @@ mod test {
let state_b_root = Hash256::from_low_u64_be(slots_per_historical_root as u64 * 2); let state_b_root = Hash256::from_low_u64_be(slots_per_historical_root as u64 * 2);
store.put_state(&state_a_root, &state_a).unwrap(); store.put_state(&state_a_root, &state_a).unwrap();
store.put_state(&state_b_root, &state_b.clone()).unwrap(); store.put_state(&state_b_root, &state_b).unwrap();
let iter = StateRootsIterator::new(store, &state_b); let iter = StateRootsIterator::new(store, &state_b);

View File

@ -202,36 +202,24 @@ mod test {
#[test] #[test]
fn test_strip_off() { fn test_strip_off() {
let expected = "hello world".as_bytes().to_vec(); let expected = b"hello world".to_vec();
assert_eq!(strip_off_newlines(b"hello world\n".to_vec()), expected);
assert_eq!( assert_eq!(
strip_off_newlines("hello world\n".as_bytes().to_vec()), strip_off_newlines(b"hello world\n\n\n\n".to_vec()),
expected expected
); );
assert_eq!(strip_off_newlines(b"hello world\r".to_vec()), expected);
assert_eq!( assert_eq!(
strip_off_newlines("hello world\n\n\n\n".as_bytes().to_vec()), strip_off_newlines(b"hello world\r\r\r\r\r".to_vec()),
expected expected
); );
assert_eq!(strip_off_newlines(b"hello world\r\n".to_vec()), expected);
assert_eq!( assert_eq!(
strip_off_newlines("hello world\r".as_bytes().to_vec()), strip_off_newlines(b"hello world\r\n\r\n".to_vec()),
expected
);
assert_eq!(
strip_off_newlines("hello world\r\r\r\r\r".as_bytes().to_vec()),
expected
);
assert_eq!(
strip_off_newlines("hello world\r\n".as_bytes().to_vec()),
expected
);
assert_eq!(
strip_off_newlines("hello world\r\n\r\n".as_bytes().to_vec()),
expected
);
assert_eq!(
strip_off_newlines("hello world".as_bytes().to_vec()),
expected expected
); );
assert_eq!(strip_off_newlines(b"hello world".to_vec()), expected);
} }
#[test] #[test]

View File

@ -247,8 +247,8 @@ mod tests {
#[test] #[test]
fn hard_coded_nets_work() { fn hard_coded_nets_work() {
for net in HARDCODED_NETS { for net in HARDCODED_NETS {
let config = let config = Eth2NetworkConfig::from_hardcoded_net(net)
Eth2NetworkConfig::from_hardcoded_net(net).expect(&format!("{:?}", net.name)); .unwrap_or_else(|_| panic!("{:?}", net.name));
if net.name == "mainnet" || net.name == "toledo" || net.name == "pyrmont" { if net.name == "mainnet" || net.name == "toledo" || net.name == "pyrmont" {
// Ensure we can parse the YAML config to a chain spec. // Ensure we can parse the YAML config to a chain spec.

View File

@ -110,7 +110,7 @@ mod test {
let _lockfile = File::create(&path).unwrap(); let _lockfile = File::create(&path).unwrap();
let lock = Lockfile::new(path.clone()).unwrap(); let lock = Lockfile::new(path).unwrap();
assert!(lock.file_existed()); assert!(lock.file_existed());
} }

View File

@ -78,7 +78,6 @@ pub fn int_to_bytes96(int: u64) -> Vec<u8> {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use hex;
use std::{fs::File, io::prelude::*, path::PathBuf}; use std::{fs::File, io::prelude::*, path::PathBuf};
use yaml_rust::yaml; use yaml_rust::yaml;

View File

@ -386,7 +386,7 @@ mod test_compute_deltas {
state_root, state_root,
target_root: finalized_root, target_root: finalized_root,
current_epoch_shuffling_id: junk_shuffling_id.clone(), current_epoch_shuffling_id: junk_shuffling_id.clone(),
next_epoch_shuffling_id: junk_shuffling_id.clone(), next_epoch_shuffling_id: junk_shuffling_id,
justified_epoch: genesis_epoch, justified_epoch: genesis_epoch,
finalized_epoch: genesis_epoch, finalized_epoch: genesis_epoch,
}) })

View File

@ -83,6 +83,7 @@ mod round_trip {
} }
#[test] #[test]
#[allow(clippy::zero_prefixed_literal)]
fn fixed_len_struct_encoding() { fn fixed_len_struct_encoding() {
let items: Vec<FixedLen> = vec![ let items: Vec<FixedLen> = vec![
FixedLen { a: 0, b: 0, c: 0 }, FixedLen { a: 0, b: 0, c: 0 },
@ -142,6 +143,7 @@ mod round_trip {
} }
#[test] #[test]
#[allow(clippy::zero_prefixed_literal)]
fn offset_into_fixed_bytes() { fn offset_into_fixed_bytes() {
let bytes = vec![ let bytes = vec![
// 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 // 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
@ -172,6 +174,7 @@ mod round_trip {
} }
#[test] #[test]
#[allow(clippy::zero_prefixed_literal)]
fn first_offset_skips_byte() { fn first_offset_skips_byte() {
let bytes = vec![ let bytes = vec![
// 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 // 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
@ -186,6 +189,7 @@ mod round_trip {
} }
#[test] #[test]
#[allow(clippy::zero_prefixed_literal)]
fn variable_len_struct_encoding() { fn variable_len_struct_encoding() {
let items: Vec<VariableLen> = vec![ let items: Vec<VariableLen> = vec![
VariableLen { VariableLen {
@ -274,6 +278,7 @@ mod round_trip {
} }
#[test] #[test]
#[allow(clippy::zero_prefixed_literal)]
fn offsets_decreasing() { fn offsets_decreasing() {
let bytes = vec![ let bytes = vec![
// 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 // 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
@ -296,6 +301,7 @@ mod round_trip {
} }
#[test] #[test]
#[allow(clippy::zero_prefixed_literal)]
fn two_variable_len_options_encoding() { fn two_variable_len_options_encoding() {
let s = TwoVariableLenOptions { let s = TwoVariableLenOptions {
a: 42, a: 42,

View File

@ -777,17 +777,17 @@ mod bitlist {
fn ssz_encode() { fn ssz_encode() {
assert_eq!( assert_eq!(
BitList0::with_capacity(0).unwrap().as_ssz_bytes(), BitList0::with_capacity(0).unwrap().as_ssz_bytes(),
vec![0b0000_00001], vec![0b0000_0001],
); );
assert_eq!( assert_eq!(
BitList1::with_capacity(0).unwrap().as_ssz_bytes(), BitList1::with_capacity(0).unwrap().as_ssz_bytes(),
vec![0b0000_00001], vec![0b0000_0001],
); );
assert_eq!( assert_eq!(
BitList1::with_capacity(1).unwrap().as_ssz_bytes(), BitList1::with_capacity(1).unwrap().as_ssz_bytes(),
vec![0b0000_00010], vec![0b0000_0010],
); );
assert_eq!( assert_eq!(

View File

@ -387,7 +387,7 @@ fn invalid_attestation_wrong_justified_checkpoint() {
index: 0, index: 0,
reason: AttestationInvalid::WrongJustifiedCheckpoint { reason: AttestationInvalid::WrongJustifiedCheckpoint {
state: Checkpoint { state: Checkpoint {
epoch: Epoch::from(2 as u64), epoch: Epoch::from(2_u64),
root: Hash256::zero(), root: Hash256::zero(),
}, },
attestation: Checkpoint { attestation: Checkpoint {
@ -878,7 +878,7 @@ fn invalid_proposer_slashing_proposal_epoch_mismatch() {
index: 0, index: 0,
reason: ProposerSlashingInvalid::ProposalSlotMismatch( reason: ProposerSlashingInvalid::ProposalSlotMismatch(
Slot::from(0_u64), Slot::from(0_u64),
Slot::from(128 as u64) Slot::from(128_u64)
) )
}) })
); );

View File

@ -181,6 +181,7 @@ mod tests {
} }
#[test] #[test]
#[allow(clippy::assertions_on_constants)]
fn sanity_check_constants() { fn sanity_check_constants() {
assert!(TOTAL_SIZE > SEED_SIZE); assert!(TOTAL_SIZE > SEED_SIZE);
assert!(TOTAL_SIZE > PIVOT_VIEW_SIZE); assert!(TOTAL_SIZE > PIVOT_VIEW_SIZE);

View File

@ -397,7 +397,7 @@ mod test {
let merklizer_root_individual_3_bytes = { let merklizer_root_individual_3_bytes = {
let mut m = MerkleHasher::with_depth(depth); let mut m = MerkleHasher::with_depth(depth);
for bytes in reference_bytes.clone().chunks(3) { for bytes in reference_bytes.chunks(3) {
m.write(bytes).expect("should process byte"); m.write(bytes).expect("should process byte");
} }
m.finish().expect("should finish") m.finish().expect("should finish")
@ -426,7 +426,7 @@ mod test {
/// of leaves and a depth. /// of leaves and a depth.
fn compare_reference_with_len(leaves: u64, depth: usize) { fn compare_reference_with_len(leaves: u64, depth: usize) {
let leaves = (0..leaves) let leaves = (0..leaves)
.map(|i| Hash256::from_low_u64_be(i)) .map(Hash256::from_low_u64_be)
.collect::<Vec<_>>(); .collect::<Vec<_>>();
compare_with_reference(&leaves, depth) compare_with_reference(&leaves, depth)
} }
@ -435,7 +435,7 @@ mod test {
/// results. /// results.
fn compare_new_with_leaf_count(num_leaves: u64, depth: usize) { fn compare_new_with_leaf_count(num_leaves: u64, depth: usize) {
let leaves = (0..num_leaves) let leaves = (0..num_leaves)
.map(|i| Hash256::from_low_u64_be(i)) .map(Hash256::from_low_u64_be)
.collect::<Vec<_>>(); .collect::<Vec<_>>();
let from_depth = { let from_depth = {

View File

@ -1,6 +1,7 @@
#![cfg(test)] #![cfg(test)]
use super::*; use super::*;
use crate::test_utils::*; use crate::test_utils::*;
use std::ops::Mul;
ssz_and_tree_hash_tests!(FoundationBeaconState); ssz_and_tree_hash_tests!(FoundationBeaconState);
@ -49,13 +50,13 @@ fn test_beacon_proposer_index<T: EthSpec>() {
// Test where we have two validators per slot. // Test where we have two validators per slot.
// 0th candidate should be chosen every time. // 0th candidate should be chosen every time.
let state = build_state(T::slots_per_epoch() as usize * 2); let state = build_state((T::slots_per_epoch() as usize).mul(2));
for i in 0..T::slots_per_epoch() { for i in 0..T::slots_per_epoch() {
test(&state, Slot::from(i), 0); test(&state, Slot::from(i), 0);
} }
// Test with two validators per slot, first validator has zero balance. // Test with two validators per slot, first validator has zero balance.
let mut state = build_state(T::slots_per_epoch() as usize * 2); let mut state = build_state((T::slots_per_epoch() as usize).mul(2));
let slot0_candidate0 = ith_candidate(&state, Slot::new(0), 0, &spec); let slot0_candidate0 = ith_candidate(&state, Slot::new(0), 0, &spec);
state.validators[slot0_candidate0].effective_balance = 0; state.validators[slot0_candidate0].effective_balance = 0;
test(&state, Slot::new(0), 1); test(&state, Slot::new(0), 1);
@ -74,8 +75,8 @@ fn beacon_proposer_index() {
/// 1. Using the cache before it's built fails. /// 1. Using the cache before it's built fails.
/// 2. Using the cache after it's build passes. /// 2. Using the cache after it's build passes.
/// 3. Using the cache after it's dropped fails. /// 3. Using the cache after it's dropped fails.
fn test_cache_initialization<'a, T: EthSpec>( fn test_cache_initialization<T: EthSpec>(
state: &'a mut BeaconState<T>, state: &mut BeaconState<T>,
relative_epoch: RelativeEpoch, relative_epoch: RelativeEpoch,
spec: &ChainSpec, spec: &ChainSpec,
) { ) {
@ -126,7 +127,7 @@ fn cache_initialization() {
} }
fn test_clone_config<E: EthSpec>(base_state: &BeaconState<E>, clone_config: CloneConfig) { fn test_clone_config<E: EthSpec>(base_state: &BeaconState<E>, clone_config: CloneConfig) {
let state = base_state.clone_with(clone_config.clone()); let state = base_state.clone_with(clone_config);
if clone_config.committee_caches { if clone_config.committee_caches {
state state
.committee_cache(RelativeEpoch::Previous) .committee_cache(RelativeEpoch::Previous)
@ -260,6 +261,7 @@ fn tree_hash_cache() {
mod committees { mod committees {
use super::*; use super::*;
use crate::beacon_state::MinimalEthSpec; use crate::beacon_state::MinimalEthSpec;
use std::ops::{Add, Div};
use swap_or_not_shuffle::shuffle_list; use swap_or_not_shuffle::shuffle_list;
fn execute_committee_consistency_test<T: EthSpec>( fn execute_committee_consistency_test<T: EthSpec>(
@ -295,7 +297,10 @@ mod committees {
// of committees in an epoch. // of committees in an epoch.
assert_eq!( assert_eq!(
beacon_committees.len() as u64, beacon_committees.len() as u64,
state.get_epoch_committee_count(relative_epoch).unwrap() / T::slots_per_epoch() state
.get_epoch_committee_count(relative_epoch)
.unwrap()
.div(T::slots_per_epoch())
); );
for (committee_index, bc) in beacon_committees.iter().enumerate() { for (committee_index, bc) in beacon_committees.iter().enumerate() {
@ -372,10 +377,11 @@ mod committees {
fn committee_consistency_test_suite<T: EthSpec>(cached_epoch: RelativeEpoch) { fn committee_consistency_test_suite<T: EthSpec>(cached_epoch: RelativeEpoch) {
let spec = T::default_spec(); let spec = T::default_spec();
let validator_count = spec.max_committees_per_slot let validator_count = spec
* T::slots_per_epoch() as usize .max_committees_per_slot
* spec.target_committee_size .mul(T::slots_per_epoch() as usize)
+ 1; .mul(spec.target_committee_size)
.add(1);
committee_consistency_test::<T>(validator_count as usize, Epoch::new(0), cached_epoch); committee_consistency_test::<T>(validator_count as usize, Epoch::new(0), cached_epoch);
@ -387,7 +393,10 @@ mod committees {
committee_consistency_test::<T>( committee_consistency_test::<T>(
validator_count as usize, validator_count as usize,
T::genesis_epoch() + T::slots_per_historical_root() as u64 * T::slots_per_epoch() * 4, T::genesis_epoch()
+ (T::slots_per_historical_root() as u64)
.mul(T::slots_per_epoch())
.mul(4),
cached_epoch, cached_epoch,
); );
} }

View File

@ -405,6 +405,7 @@ mod tests {
let _ = ChainSpec::mainnet(); let _ = ChainSpec::mainnet();
} }
#[allow(clippy::useless_vec)]
fn test_domain(domain_type: Domain, raw_domain: u32, spec: &ChainSpec) { fn test_domain(domain_type: Domain, raw_domain: u32, spec: &ChainSpec) {
let previous_version = [0, 0, 0, 1]; let previous_version = [0, 0, 0, 1];
let current_version = [0, 0, 0, 2]; let current_version = [0, 0, 0, 2];

View File

@ -137,9 +137,11 @@ mod test {
#[test] #[test]
fn zeroed_validator() { fn zeroed_validator() {
let mut v = Validator::default(); let v = Validator {
v.activation_eligibility_epoch = Epoch::from(0u64); activation_eligibility_epoch: Epoch::from(0u64),
v.activation_epoch = Epoch::from(0u64); activation_epoch: Epoch::from(0u64),
..Default::default()
};
test_validator_tree_hash(&v); test_validator_tree_hash(&v);
} }
@ -153,6 +155,7 @@ mod test {
} }
#[test] #[test]
#[allow(clippy::assertions_on_constants)]
pub fn smallvec_size_check() { pub fn smallvec_size_check() {
// If this test fails we need to go and reassess the length of the `SmallVec` in // If this test fails we need to go and reassess the length of the `SmallVec` in
// `cached_tree_hash::TreeHashCache`. If the size of the `SmallVec` is too slow we're going // `cached_tree_hash::TreeHashCache`. If the size of the `SmallVec` is too slow we're going

View File

@ -135,7 +135,7 @@ pub mod tests_commons {
pub fn assert_backend_new_error(matches: &ArgMatches, error_msg: &str) { pub fn assert_backend_new_error(matches: &ArgMatches, error_msg: &str) {
match Backend::new(matches, &get_null_logger()) { match Backend::new(matches, &get_null_logger()) {
Ok(_) => panic!("This invocation to Backend::new() should return error"), Ok(_) => panic!("This invocation to Backend::new() should return error"),
Err(e) => assert_eq!(e.to_string(), error_msg), Err(e) => assert_eq!(e, error_msg),
} }
} }
} }
@ -188,7 +188,7 @@ pub mod backend_new {
match result { match result {
Ok(_) => panic!("This invocation to Backend::new() should return error"), Ok(_) => panic!("This invocation to Backend::new() should return error"),
Err(e) => assert_eq!(e.to_string(), "Storage Raw Dir: PermissionDenied",), Err(e) => assert_eq!(e, "Storage Raw Dir: PermissionDenied",),
} }
} }

View File

@ -107,7 +107,7 @@ fn surrounds_existing_single_val_single_chunk() {
fn surrounds_existing_multi_vals_single_chunk() { fn surrounds_existing_multi_vals_single_chunk() {
let validators = vec![0, 16, 1024, 300_000, 300_001]; let validators = vec![0, 16, 1024, 300_000, 300_001];
let att1 = indexed_att(validators.clone(), 1, 2, 0); let att1 = indexed_att(validators.clone(), 1, 2, 0);
let att2 = indexed_att(validators.clone(), 0, 3, 0); let att2 = indexed_att(validators, 0, 3, 0);
let slashings = hashset![att_slashing(&att2, &att1)]; let slashings = hashset![att_slashing(&att2, &att1)];
slasher_test_indiv(&[att1, att2], &slashings, 3); slasher_test_indiv(&[att1, att2], &slashings, 3);
} }

View File

@ -9,7 +9,7 @@ use types::{Epoch, EthSpec};
fn empty_pruning() { fn empty_pruning() {
let tempdir = tempdir().unwrap(); let tempdir = tempdir().unwrap();
let config = Config::new(tempdir.path().into()); let config = Config::new(tempdir.path().into());
let slasher = Slasher::<E>::open(config.clone(), logger()).unwrap(); let slasher = Slasher::<E>::open(config, logger()).unwrap();
slasher.prune_database(Epoch::new(0)).unwrap(); slasher.prune_database(Epoch::new(0)).unwrap();
} }

View File

@ -62,7 +62,7 @@ fn random_test(seed: u64, test_config: TestConfig) {
.choose_multiple(&mut rng, num_attesters) .choose_multiple(&mut rng, num_attesters)
.copied() .copied()
.collect::<Vec<u64>>(); .collect::<Vec<u64>>();
attesting_indices.sort(); attesting_indices.sort_unstable();
// If checking slashings, generate valid attestations in range. // If checking slashings, generate valid attestations in range.
let (source, target) = if check_slashings { let (source, target) = if check_slashings {

View File

@ -30,7 +30,7 @@ fn attestation_pruning_empty_wrap_around() {
// Add an attestation that would be surrounded with the modulo considered // Add an attestation that would be surrounded with the modulo considered
slasher.accept_attestation(indexed_att( slasher.accept_attestation(indexed_att(
v.clone(), v,
2 * history_length - 3, 2 * history_length - 3,
2 * history_length - 2, 2 * history_length - 2,
1, 1,
@ -48,7 +48,7 @@ fn pruning_with_map_full() {
config.history_length = 1024; config.history_length = 1024;
config.max_db_size_mbs = 1; config.max_db_size_mbs = 1;
let slasher = Slasher::open(config.clone(), logger()).unwrap(); let slasher = Slasher::open(config, logger()).unwrap();
let v = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]; let v = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];

View File

@ -306,7 +306,7 @@ fn invalid_surrounding_from_first_source() {
let second = attestation_data_builder(3, 4); let second = attestation_data_builder(3, 4);
StreamTest { StreamTest {
cases: vec![ cases: vec![
Test::single(first.clone()), Test::single(first),
Test::single(second.clone()), Test::single(second.clone()),
Test::single(attestation_data_builder(2, 5)).expect_invalid_att( Test::single(attestation_data_builder(2, 5)).expect_invalid_att(
InvalidAttestation::NewSurroundsPrev { InvalidAttestation::NewSurroundsPrev {
@ -326,8 +326,8 @@ fn invalid_surrounding_multiple_votes() {
let third = attestation_data_builder(2, 3); let third = attestation_data_builder(2, 3);
StreamTest { StreamTest {
cases: vec![ cases: vec![
Test::single(first.clone()), Test::single(first),
Test::single(second.clone()), Test::single(second),
Test::single(third.clone()), Test::single(third.clone()),
Test::single(attestation_data_builder(0, 4)).expect_invalid_att( Test::single(attestation_data_builder(0, 4)).expect_invalid_att(
InvalidAttestation::NewSurroundsPrev { InvalidAttestation::NewSurroundsPrev {

View File

@ -69,7 +69,7 @@ fn valid_same_block_different_validator() {
registered_validators: vec![pubkey(0), pubkey(1)], registered_validators: vec![pubkey(0), pubkey(1)],
cases: vec![ cases: vec![
Test::with_pubkey(pubkey(0), block.clone()), Test::with_pubkey(pubkey(0), block.clone()),
Test::with_pubkey(pubkey(1), block.clone()), Test::with_pubkey(pubkey(1), block),
], ],
} }
.run() .run()

View File

@ -119,6 +119,7 @@ mod test {
use super::*; use super::*;
#[test] #[test]
#[allow(clippy::eq_op)]
fn signing_root_partial_eq() { fn signing_root_partial_eq() {
let h0 = SigningRoot(Hash256::zero()); let h0 = SigningRoot(Hash256::zero());
let h1 = SigningRoot(Hash256::repeat_byte(1)); let h1 = SigningRoot(Hash256::repeat_byte(1));

View File

@ -88,7 +88,7 @@ impl<E: EthSpec> ForkServiceBuilder<slot_clock::TestingSlotClock, E> {
eth2::Url::parse("http://127.0.0.1").unwrap(), eth2::Url::parse("http://127.0.0.1").unwrap(),
))]; ))];
let mut beacon_nodes = BeaconNodeFallback::new(candidates, spec, log.clone()); let mut beacon_nodes = BeaconNodeFallback::new(candidates, spec, log.clone());
beacon_nodes.set_slot_clock(slot_clock.clone()); beacon_nodes.set_slot_clock(slot_clock);
Self { Self {
fork: Some(types::Fork::default()), fork: Some(types::Fork::default()),