Fix clippy errors on tests (#2160)
## Issue Addressed
There are some clippy error on tests.
## Proposed Changes
Enable clippy check on tests and fix the errors. 💪
This commit is contained in:
parent
e4b62139d7
commit
1a22a096c6
2
Makefile
2
Makefile
@ -119,7 +119,7 @@ test-full: cargo-fmt test-release test-debug test-ef
|
|||||||
# Lints the code for bad style and potentially unsafe arithmetic using Clippy.
|
# Lints the code for bad style and potentially unsafe arithmetic using Clippy.
|
||||||
# Clippy lints are opt-in per-crate for now. By default, everything is allowed except for performance and correctness lints.
|
# Clippy lints are opt-in per-crate for now. By default, everything is allowed except for performance and correctness lints.
|
||||||
lint:
|
lint:
|
||||||
cargo clippy --all -- -D warnings
|
cargo clippy --all --tests -- -D warnings
|
||||||
|
|
||||||
# Runs the makefile in the `ef_tests` repo.
|
# Runs the makefile in the `ef_tests` repo.
|
||||||
#
|
#
|
||||||
|
@ -668,7 +668,6 @@ fn is_candidate_block(block: &Eth1Block, period_start: u64, spec: &ChainSpec) ->
|
|||||||
mod test {
|
mod test {
|
||||||
use super::*;
|
use super::*;
|
||||||
use environment::null_logger;
|
use environment::null_logger;
|
||||||
use std::iter::FromIterator;
|
|
||||||
use types::{test_utils::DepositTestTask, MinimalEthSpec};
|
use types::{test_utils::DepositTestTask, MinimalEthSpec};
|
||||||
|
|
||||||
type E = MinimalEthSpec;
|
type E = MinimalEthSpec;
|
||||||
@ -1042,10 +1041,7 @@ mod test {
|
|||||||
|
|
||||||
let votes_to_consider = get_eth1_data_vec(slots, 0);
|
let votes_to_consider = get_eth1_data_vec(slots, 0);
|
||||||
|
|
||||||
let votes = collect_valid_votes(
|
let votes = collect_valid_votes(&state, &votes_to_consider.into_iter().collect());
|
||||||
&state,
|
|
||||||
&HashMap::from_iter(votes_to_consider.clone().into_iter()),
|
|
||||||
);
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
votes.len(),
|
votes.len(),
|
||||||
0,
|
0,
|
||||||
@ -1068,10 +1064,8 @@ mod test {
|
|||||||
.collect::<Vec<_>>()
|
.collect::<Vec<_>>()
|
||||||
.into();
|
.into();
|
||||||
|
|
||||||
let votes = collect_valid_votes(
|
let votes =
|
||||||
&state,
|
collect_valid_votes(&state, &votes_to_consider.clone().into_iter().collect());
|
||||||
&HashMap::from_iter(votes_to_consider.clone().into_iter()),
|
|
||||||
);
|
|
||||||
assert_votes!(
|
assert_votes!(
|
||||||
votes,
|
votes,
|
||||||
votes_to_consider[0..slots as usize / 4].to_vec(),
|
votes_to_consider[0..slots as usize / 4].to_vec(),
|
||||||
@ -1099,10 +1093,7 @@ mod test {
|
|||||||
.collect::<Vec<_>>()
|
.collect::<Vec<_>>()
|
||||||
.into();
|
.into();
|
||||||
|
|
||||||
let votes = collect_valid_votes(
|
let votes = collect_valid_votes(&state, &votes_to_consider.into_iter().collect());
|
||||||
&state,
|
|
||||||
&HashMap::from_iter(votes_to_consider.clone().into_iter()),
|
|
||||||
);
|
|
||||||
assert_votes!(
|
assert_votes!(
|
||||||
votes,
|
votes,
|
||||||
// There should only be one value if there's a duplicate
|
// There should only be one value if there's a duplicate
|
||||||
@ -1150,8 +1141,7 @@ mod test {
|
|||||||
assert_eq!(
|
assert_eq!(
|
||||||
// Favour the highest block number when there are no votes.
|
// Favour the highest block number when there are no votes.
|
||||||
vote_data(&no_votes[2]),
|
vote_data(&no_votes[2]),
|
||||||
find_winning_vote(Eth1DataVoteCount::from_iter(no_votes.into_iter()))
|
find_winning_vote(no_votes.into_iter().collect()).expect("should find winner")
|
||||||
.expect("should find winner")
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1162,8 +1152,7 @@ mod test {
|
|||||||
assert_eq!(
|
assert_eq!(
|
||||||
// Favour the highest block number when there are equal votes.
|
// Favour the highest block number when there are equal votes.
|
||||||
vote_data(&votes[2]),
|
vote_data(&votes[2]),
|
||||||
find_winning_vote(Eth1DataVoteCount::from_iter(votes.into_iter()))
|
find_winning_vote(votes.into_iter().collect()).expect("should find winner")
|
||||||
.expect("should find winner")
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1174,8 +1163,7 @@ mod test {
|
|||||||
assert_eq!(
|
assert_eq!(
|
||||||
// Favour the highest vote over the highest block number.
|
// Favour the highest vote over the highest block number.
|
||||||
vote_data(&votes[3]),
|
vote_data(&votes[3]),
|
||||||
find_winning_vote(Eth1DataVoteCount::from_iter(votes.into_iter()))
|
find_winning_vote(votes.into_iter().collect()).expect("should find winner")
|
||||||
.expect("should find winner")
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1186,8 +1174,7 @@ mod test {
|
|||||||
assert_eq!(
|
assert_eq!(
|
||||||
// Favour the highest block number for tying votes.
|
// Favour the highest block number for tying votes.
|
||||||
vote_data(&votes[3]),
|
vote_data(&votes[3]),
|
||||||
find_winning_vote(Eth1DataVoteCount::from_iter(votes.into_iter()))
|
find_winning_vote(votes.into_iter().collect()).expect("should find winner")
|
||||||
.expect("should find winner")
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1198,8 +1185,7 @@ mod test {
|
|||||||
assert_eq!(
|
assert_eq!(
|
||||||
// Favour the highest block number for tying votes.
|
// Favour the highest block number for tying votes.
|
||||||
vote_data(&votes[0]),
|
vote_data(&votes[0]),
|
||||||
find_winning_vote(Eth1DataVoteCount::from_iter(votes.into_iter()))
|
find_winning_vote(votes.into_iter().collect()).expect("should find winner")
|
||||||
.expect("should find winner")
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -238,6 +238,7 @@ mod test {
|
|||||||
builder.build()
|
builder.build()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::needless_range_loop)]
|
||||||
fn check_cache_get(cache: &ValidatorPubkeyCache, keypairs: &[Keypair]) {
|
fn check_cache_get(cache: &ValidatorPubkeyCache, keypairs: &[Keypair]) {
|
||||||
let validator_count = keypairs.len();
|
let validator_count = keypairs.len();
|
||||||
|
|
||||||
|
@ -160,7 +160,6 @@ fn ensure_dir_exists(path: PathBuf) -> Result<PathBuf, String> {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use toml;
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn serde() {
|
fn serde() {
|
||||||
|
@ -397,7 +397,7 @@ pub mod tests {
|
|||||||
.expect("should get the four from the full tree");
|
.expect("should get the four from the full tree");
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
deposits.len(),
|
deposits.len(),
|
||||||
4 as usize,
|
4_usize,
|
||||||
"should get 4 deposits from full tree"
|
"should get 4 deposits from full tree"
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@ -418,7 +418,7 @@ pub mod tests {
|
|||||||
.expect("should get the half tree");
|
.expect("should get the half tree");
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
deposits.len(),
|
deposits.len(),
|
||||||
4 as usize,
|
4_usize,
|
||||||
"should get 4 deposits from half tree"
|
"should get 4 deposits from half tree"
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
|
@ -1179,7 +1179,6 @@ async fn download_eth1_block(
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use toml;
|
|
||||||
use types::MainnetEthSpec;
|
use types::MainnetEthSpec;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@ -964,8 +964,10 @@ mod tests {
|
|||||||
|
|
||||||
async fn build_discovery() -> Discovery<E> {
|
async fn build_discovery() -> Discovery<E> {
|
||||||
let keypair = libp2p::identity::Keypair::generate_secp256k1();
|
let keypair = libp2p::identity::Keypair::generate_secp256k1();
|
||||||
let mut config = NetworkConfig::default();
|
let config = NetworkConfig {
|
||||||
config.discovery_port = unused_port();
|
discovery_port: unused_port(),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
let enr_key: CombinedKey = CombinedKey::from_libp2p(&keypair).unwrap();
|
let enr_key: CombinedKey = CombinedKey::from_libp2p(&keypair).unwrap();
|
||||||
let enr: Enr = build_enr::<E>(&enr_key, &config, EnrForkId::default()).unwrap();
|
let enr: Enr = build_enr::<E>(&enr_key, &config, EnrForkId::default()).unwrap();
|
||||||
let log = build_log(slog::Level::Debug, false);
|
let log = build_log(slog::Level::Debug, false);
|
||||||
@ -1055,7 +1057,7 @@ mod tests {
|
|||||||
discovery.queued_queries.push_back(QueryType::FindPeers);
|
discovery.queued_queries.push_back(QueryType::FindPeers);
|
||||||
discovery
|
discovery
|
||||||
.queued_queries
|
.queued_queries
|
||||||
.push_back(QueryType::Subnet(subnet_query.clone()));
|
.push_back(QueryType::Subnet(subnet_query));
|
||||||
// Process Subnet query and FindPeers afterwards.
|
// Process Subnet query and FindPeers afterwards.
|
||||||
assert!(discovery.process_queue());
|
assert!(discovery.process_queue());
|
||||||
}
|
}
|
||||||
@ -1101,7 +1103,7 @@ mod tests {
|
|||||||
// Unwanted enr for the given grouped query
|
// Unwanted enr for the given grouped query
|
||||||
let enr3 = make_enr(vec![3]);
|
let enr3 = make_enr(vec![3]);
|
||||||
|
|
||||||
let enrs: Vec<Enr> = vec![enr1.clone(), enr2.clone(), enr3.clone()];
|
let enrs: Vec<Enr> = vec![enr1.clone(), enr2, enr3];
|
||||||
let results = discovery
|
let results = discovery
|
||||||
.process_completed_queries(QueryResult(query, Ok(enrs)))
|
.process_completed_queries(QueryResult(query, Ok(enrs)))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
@ -645,6 +645,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
#[allow(clippy::float_cmp)]
|
||||||
fn test_peer_connected_successfully() {
|
fn test_peer_connected_successfully() {
|
||||||
let mut pdb = get_db();
|
let mut pdb = get_db();
|
||||||
let random_peer = PeerId::random();
|
let random_peer = PeerId::random();
|
||||||
@ -745,7 +746,7 @@ mod tests {
|
|||||||
assert!(the_best.is_some());
|
assert!(the_best.is_some());
|
||||||
// Consistency check
|
// Consistency check
|
||||||
let best_peers = pdb.best_peers_by_status(PeerInfo::is_connected);
|
let best_peers = pdb.best_peers_by_status(PeerInfo::is_connected);
|
||||||
assert_eq!(the_best, best_peers.iter().next().map(|p| p.0));
|
assert_eq!(the_best.unwrap(), best_peers.get(0).unwrap().0);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -839,7 +840,7 @@ mod tests {
|
|||||||
pdb.notify_disconnect(&random_peer2);
|
pdb.notify_disconnect(&random_peer2);
|
||||||
pdb.disconnect_and_ban(&random_peer3);
|
pdb.disconnect_and_ban(&random_peer3);
|
||||||
pdb.notify_disconnect(&random_peer3);
|
pdb.notify_disconnect(&random_peer3);
|
||||||
pdb.connect_ingoing(&random_peer, multiaddr.clone(), None);
|
pdb.connect_ingoing(&random_peer, multiaddr, None);
|
||||||
assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count());
|
assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
pdb.banned_peers_count.banned_peers(),
|
pdb.banned_peers_count.banned_peers(),
|
||||||
@ -1021,10 +1022,11 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
#[allow(clippy::float_cmp)]
|
||||||
fn test_trusted_peers_score() {
|
fn test_trusted_peers_score() {
|
||||||
let trusted_peer = PeerId::random();
|
let trusted_peer = PeerId::random();
|
||||||
let log = build_log(slog::Level::Debug, false);
|
let log = build_log(slog::Level::Debug, false);
|
||||||
let mut pdb: PeerDB<M> = PeerDB::new(vec![trusted_peer.clone()], &log);
|
let mut pdb: PeerDB<M> = PeerDB::new(vec![trusted_peer], &log);
|
||||||
|
|
||||||
pdb.connect_ingoing(&trusted_peer, "/ip4/0.0.0.0".parse().unwrap(), None);
|
pdb.connect_ingoing(&trusted_peer, "/ip4/0.0.0.0".parse().unwrap(), None);
|
||||||
|
|
||||||
|
@ -346,6 +346,7 @@ mod tests {
|
|||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
#[allow(clippy::float_cmp)]
|
||||||
fn test_reputation_change() {
|
fn test_reputation_change() {
|
||||||
let mut score = Score::default();
|
let mut score = Score::default();
|
||||||
|
|
||||||
@ -375,6 +376,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
#[allow(clippy::float_cmp)]
|
||||||
fn test_ban_time() {
|
fn test_ban_time() {
|
||||||
let mut score = RealScore::default();
|
let mut score = RealScore::default();
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
@ -402,6 +404,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
#[allow(clippy::float_cmp)]
|
||||||
fn test_ignored_gossipsub_score() {
|
fn test_ignored_gossipsub_score() {
|
||||||
let mut score = Score::default();
|
let mut score = Score::default();
|
||||||
score.update_gossipsub_score(GOSSIPSUB_GREYLIST_THRESHOLD, true);
|
score.update_gossipsub_score(GOSSIPSUB_GREYLIST_THRESHOLD, true);
|
||||||
|
@ -126,8 +126,7 @@ pub async fn build_libp2p_instance(
|
|||||||
|
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
pub fn get_enr(node: &LibP2PService<E>) -> Enr {
|
pub fn get_enr(node: &LibP2PService<E>) -> Enr {
|
||||||
let enr = node.swarm.local_enr().clone();
|
node.swarm.local_enr()
|
||||||
enr
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns `n` libp2p peers in fully connected topology.
|
// Returns `n` libp2p peers in fully connected topology.
|
||||||
|
@ -17,6 +17,7 @@ type E = MinimalEthSpec;
|
|||||||
|
|
||||||
// Tests the STATUS RPC message
|
// Tests the STATUS RPC message
|
||||||
#[test]
|
#[test]
|
||||||
|
#[allow(clippy::single_match)]
|
||||||
fn test_status_rpc() {
|
fn test_status_rpc() {
|
||||||
// set up the logging. The level and enabled logging or not
|
// set up the logging. The level and enabled logging or not
|
||||||
let log_level = Level::Debug;
|
let log_level = Level::Debug;
|
||||||
@ -113,6 +114,7 @@ fn test_status_rpc() {
|
|||||||
|
|
||||||
// Tests a streamed BlocksByRange RPC Message
|
// Tests a streamed BlocksByRange RPC Message
|
||||||
#[test]
|
#[test]
|
||||||
|
#[allow(clippy::single_match)]
|
||||||
fn test_blocks_by_range_chunked_rpc() {
|
fn test_blocks_by_range_chunked_rpc() {
|
||||||
// set up the logging. The level and enabled logging or not
|
// set up the logging. The level and enabled logging or not
|
||||||
let log_level = Level::Trace;
|
let log_level = Level::Trace;
|
||||||
@ -199,7 +201,7 @@ fn test_blocks_by_range_chunked_rpc() {
|
|||||||
warn!(log, "Receiver got request");
|
warn!(log, "Receiver got request");
|
||||||
for _ in 1..=messages_to_send {
|
for _ in 1..=messages_to_send {
|
||||||
receiver.swarm.send_successful_response(
|
receiver.swarm.send_successful_response(
|
||||||
peer_id.clone(),
|
peer_id,
|
||||||
id,
|
id,
|
||||||
rpc_response.clone(),
|
rpc_response.clone(),
|
||||||
);
|
);
|
||||||
@ -340,8 +342,8 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() {
|
|||||||
messages_sent += 1;
|
messages_sent += 1;
|
||||||
let (peer_id, stream_id) = message_info.as_ref().unwrap();
|
let (peer_id, stream_id) = message_info.as_ref().unwrap();
|
||||||
receiver.swarm.send_successful_response(
|
receiver.swarm.send_successful_response(
|
||||||
peer_id.clone(),
|
*peer_id,
|
||||||
stream_id.clone(),
|
*stream_id,
|
||||||
rpc_response.clone(),
|
rpc_response.clone(),
|
||||||
);
|
);
|
||||||
debug!(log, "Sending message {}", messages_sent);
|
debug!(log, "Sending message {}", messages_sent);
|
||||||
@ -365,6 +367,7 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() {
|
|||||||
|
|
||||||
// Tests an empty response to a BlocksByRange RPC Message
|
// Tests an empty response to a BlocksByRange RPC Message
|
||||||
#[test]
|
#[test]
|
||||||
|
#[allow(clippy::single_match)]
|
||||||
fn test_blocks_by_range_single_empty_rpc() {
|
fn test_blocks_by_range_single_empty_rpc() {
|
||||||
// set up the logging. The level and enabled logging or not
|
// set up the logging. The level and enabled logging or not
|
||||||
let log_level = Level::Trace;
|
let log_level = Level::Trace;
|
||||||
@ -448,7 +451,7 @@ fn test_blocks_by_range_single_empty_rpc() {
|
|||||||
|
|
||||||
for _ in 1..=messages_to_send {
|
for _ in 1..=messages_to_send {
|
||||||
receiver.swarm.send_successful_response(
|
receiver.swarm.send_successful_response(
|
||||||
peer_id.clone(),
|
peer_id,
|
||||||
id,
|
id,
|
||||||
rpc_response.clone(),
|
rpc_response.clone(),
|
||||||
);
|
);
|
||||||
@ -480,6 +483,7 @@ fn test_blocks_by_range_single_empty_rpc() {
|
|||||||
// which is greater than the Snappy frame size. Hence, this test
|
// which is greater than the Snappy frame size. Hence, this test
|
||||||
// serves to test the snappy framing format as well.
|
// serves to test the snappy framing format as well.
|
||||||
#[test]
|
#[test]
|
||||||
|
#[allow(clippy::single_match)]
|
||||||
fn test_blocks_by_root_chunked_rpc() {
|
fn test_blocks_by_root_chunked_rpc() {
|
||||||
// set up the logging. The level and enabled logging or not
|
// set up the logging. The level and enabled logging or not
|
||||||
let log_level = Level::Debug;
|
let log_level = Level::Debug;
|
||||||
@ -565,7 +569,7 @@ fn test_blocks_by_root_chunked_rpc() {
|
|||||||
|
|
||||||
for _ in 1..=messages_to_send {
|
for _ in 1..=messages_to_send {
|
||||||
receiver.swarm.send_successful_response(
|
receiver.swarm.send_successful_response(
|
||||||
peer_id.clone(),
|
peer_id,
|
||||||
id,
|
id,
|
||||||
rpc_response.clone(),
|
rpc_response.clone(),
|
||||||
);
|
);
|
||||||
@ -715,8 +719,8 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() {
|
|||||||
messages_sent += 1;
|
messages_sent += 1;
|
||||||
let (peer_id, stream_id) = message_info.as_ref().unwrap();
|
let (peer_id, stream_id) = message_info.as_ref().unwrap();
|
||||||
receiver.swarm.send_successful_response(
|
receiver.swarm.send_successful_response(
|
||||||
peer_id.clone(),
|
*peer_id,
|
||||||
stream_id.clone(),
|
*stream_id,
|
||||||
rpc_response.clone(),
|
rpc_response.clone(),
|
||||||
);
|
);
|
||||||
debug!(log, "Sending message {}", messages_sent);
|
debug!(log, "Sending message {}", messages_sent);
|
||||||
@ -740,6 +744,7 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() {
|
|||||||
|
|
||||||
// Tests a Goodbye RPC message
|
// Tests a Goodbye RPC message
|
||||||
#[test]
|
#[test]
|
||||||
|
#[allow(clippy::single_match)]
|
||||||
fn test_goodbye_rpc() {
|
fn test_goodbye_rpc() {
|
||||||
// set up the logging. The level and enabled logging or not
|
// set up the logging. The level and enabled logging or not
|
||||||
let log_level = Level::Trace;
|
let log_level = Level::Trace;
|
||||||
|
@ -20,6 +20,7 @@ use types::{Attestation, EthSpec, Slot, SubnetId, ValidatorSubscription};
|
|||||||
|
|
||||||
use crate::metrics;
|
use crate::metrics;
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
mod tests;
|
mod tests;
|
||||||
|
|
||||||
/// The minimum number of slots ahead that we attempt to discover peers for a subscription. If the
|
/// The minimum number of slots ahead that we attempt to discover peers for a subscription. If the
|
||||||
|
@ -1,6 +1,4 @@
|
|||||||
#[cfg(test)]
|
use super::*;
|
||||||
mod tests {
|
|
||||||
use super::super::*;
|
|
||||||
use beacon_chain::{
|
use beacon_chain::{
|
||||||
builder::{BeaconChainBuilder, Witness},
|
builder::{BeaconChainBuilder, Witness},
|
||||||
eth1_chain::CachingEth1Backend,
|
eth1_chain::CachingEth1Backend,
|
||||||
@ -41,8 +39,7 @@ mod tests {
|
|||||||
|
|
||||||
let log = get_logger();
|
let log = get_logger();
|
||||||
let store =
|
let store =
|
||||||
HotColdDB::open_ephemeral(StoreConfig::default(), spec.clone(), log.clone())
|
HotColdDB::open_ephemeral(StoreConfig::default(), spec.clone(), log.clone()).unwrap();
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let (shutdown_tx, _) = futures::channel::mpsc::channel(1);
|
let (shutdown_tx, _) = futures::channel::mpsc::channel(1);
|
||||||
|
|
||||||
@ -345,12 +342,10 @@ mod tests {
|
|||||||
|
|
||||||
for event in &events {
|
for event in &events {
|
||||||
match event {
|
match event {
|
||||||
AttServiceMessage::DiscoverPeers(_) => {
|
AttServiceMessage::DiscoverPeers(_) => discover_peer_count += 1,
|
||||||
discover_peer_count = discover_peer_count + 1
|
|
||||||
}
|
|
||||||
AttServiceMessage::Subscribe(_any_subnet) => {}
|
AttServiceMessage::Subscribe(_any_subnet) => {}
|
||||||
AttServiceMessage::EnrAdd(_any_subnet) => enr_add_count = enr_add_count + 1,
|
AttServiceMessage::EnrAdd(_any_subnet) => enr_add_count += 1,
|
||||||
_ => unexpected_msg_count = unexpected_msg_count + 1,
|
_ => unexpected_msg_count += 1,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -405,12 +400,10 @@ mod tests {
|
|||||||
|
|
||||||
for event in &events {
|
for event in &events {
|
||||||
match event {
|
match event {
|
||||||
AttServiceMessage::DiscoverPeers(_) => {
|
AttServiceMessage::DiscoverPeers(_) => discover_peer_count += 1,
|
||||||
discover_peer_count = discover_peer_count + 1
|
|
||||||
}
|
|
||||||
AttServiceMessage::Subscribe(_any_subnet) => {}
|
AttServiceMessage::Subscribe(_any_subnet) => {}
|
||||||
AttServiceMessage::EnrAdd(_any_subnet) => enr_add_count = enr_add_count + 1,
|
AttServiceMessage::EnrAdd(_any_subnet) => enr_add_count += 1,
|
||||||
_ => unexpected_msg_count = unexpected_msg_count + 1,
|
_ => unexpected_msg_count += 1,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -429,4 +422,3 @@ mod tests {
|
|||||||
assert_eq!(enr_add_count, 64);
|
assert_eq!(enr_add_count, 64);
|
||||||
assert_eq!(unexpected_msg_count, 0);
|
assert_eq!(unexpected_msg_count, 0);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
@ -432,7 +432,7 @@ mod test {
|
|||||||
let state_b_root = Hash256::from_low_u64_be(slots_per_historical_root as u64 * 2);
|
let state_b_root = Hash256::from_low_u64_be(slots_per_historical_root as u64 * 2);
|
||||||
|
|
||||||
store.put_state(&state_a_root, &state_a).unwrap();
|
store.put_state(&state_a_root, &state_a).unwrap();
|
||||||
store.put_state(&state_b_root, &state_b.clone()).unwrap();
|
store.put_state(&state_b_root, &state_b).unwrap();
|
||||||
|
|
||||||
let iter = StateRootsIterator::new(store, &state_b);
|
let iter = StateRootsIterator::new(store, &state_b);
|
||||||
|
|
||||||
|
@ -202,36 +202,24 @@ mod test {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_strip_off() {
|
fn test_strip_off() {
|
||||||
let expected = "hello world".as_bytes().to_vec();
|
let expected = b"hello world".to_vec();
|
||||||
|
|
||||||
|
assert_eq!(strip_off_newlines(b"hello world\n".to_vec()), expected);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
strip_off_newlines("hello world\n".as_bytes().to_vec()),
|
strip_off_newlines(b"hello world\n\n\n\n".to_vec()),
|
||||||
expected
|
expected
|
||||||
);
|
);
|
||||||
|
assert_eq!(strip_off_newlines(b"hello world\r".to_vec()), expected);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
strip_off_newlines("hello world\n\n\n\n".as_bytes().to_vec()),
|
strip_off_newlines(b"hello world\r\r\r\r\r".to_vec()),
|
||||||
expected
|
expected
|
||||||
);
|
);
|
||||||
|
assert_eq!(strip_off_newlines(b"hello world\r\n".to_vec()), expected);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
strip_off_newlines("hello world\r".as_bytes().to_vec()),
|
strip_off_newlines(b"hello world\r\n\r\n".to_vec()),
|
||||||
expected
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
strip_off_newlines("hello world\r\r\r\r\r".as_bytes().to_vec()),
|
|
||||||
expected
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
strip_off_newlines("hello world\r\n".as_bytes().to_vec()),
|
|
||||||
expected
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
strip_off_newlines("hello world\r\n\r\n".as_bytes().to_vec()),
|
|
||||||
expected
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
strip_off_newlines("hello world".as_bytes().to_vec()),
|
|
||||||
expected
|
expected
|
||||||
);
|
);
|
||||||
|
assert_eq!(strip_off_newlines(b"hello world".to_vec()), expected);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@ -247,8 +247,8 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn hard_coded_nets_work() {
|
fn hard_coded_nets_work() {
|
||||||
for net in HARDCODED_NETS {
|
for net in HARDCODED_NETS {
|
||||||
let config =
|
let config = Eth2NetworkConfig::from_hardcoded_net(net)
|
||||||
Eth2NetworkConfig::from_hardcoded_net(net).expect(&format!("{:?}", net.name));
|
.unwrap_or_else(|_| panic!("{:?}", net.name));
|
||||||
|
|
||||||
if net.name == "mainnet" || net.name == "toledo" || net.name == "pyrmont" {
|
if net.name == "mainnet" || net.name == "toledo" || net.name == "pyrmont" {
|
||||||
// Ensure we can parse the YAML config to a chain spec.
|
// Ensure we can parse the YAML config to a chain spec.
|
||||||
|
@ -110,7 +110,7 @@ mod test {
|
|||||||
|
|
||||||
let _lockfile = File::create(&path).unwrap();
|
let _lockfile = File::create(&path).unwrap();
|
||||||
|
|
||||||
let lock = Lockfile::new(path.clone()).unwrap();
|
let lock = Lockfile::new(path).unwrap();
|
||||||
assert!(lock.file_existed());
|
assert!(lock.file_existed());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -78,7 +78,6 @@ pub fn int_to_bytes96(int: u64) -> Vec<u8> {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use hex;
|
|
||||||
use std::{fs::File, io::prelude::*, path::PathBuf};
|
use std::{fs::File, io::prelude::*, path::PathBuf};
|
||||||
use yaml_rust::yaml;
|
use yaml_rust::yaml;
|
||||||
|
|
||||||
|
@ -386,7 +386,7 @@ mod test_compute_deltas {
|
|||||||
state_root,
|
state_root,
|
||||||
target_root: finalized_root,
|
target_root: finalized_root,
|
||||||
current_epoch_shuffling_id: junk_shuffling_id.clone(),
|
current_epoch_shuffling_id: junk_shuffling_id.clone(),
|
||||||
next_epoch_shuffling_id: junk_shuffling_id.clone(),
|
next_epoch_shuffling_id: junk_shuffling_id,
|
||||||
justified_epoch: genesis_epoch,
|
justified_epoch: genesis_epoch,
|
||||||
finalized_epoch: genesis_epoch,
|
finalized_epoch: genesis_epoch,
|
||||||
})
|
})
|
||||||
|
@ -83,6 +83,7 @@ mod round_trip {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
#[allow(clippy::zero_prefixed_literal)]
|
||||||
fn fixed_len_struct_encoding() {
|
fn fixed_len_struct_encoding() {
|
||||||
let items: Vec<FixedLen> = vec![
|
let items: Vec<FixedLen> = vec![
|
||||||
FixedLen { a: 0, b: 0, c: 0 },
|
FixedLen { a: 0, b: 0, c: 0 },
|
||||||
@ -142,6 +143,7 @@ mod round_trip {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
#[allow(clippy::zero_prefixed_literal)]
|
||||||
fn offset_into_fixed_bytes() {
|
fn offset_into_fixed_bytes() {
|
||||||
let bytes = vec![
|
let bytes = vec![
|
||||||
// 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
|
// 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
|
||||||
@ -172,6 +174,7 @@ mod round_trip {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
#[allow(clippy::zero_prefixed_literal)]
|
||||||
fn first_offset_skips_byte() {
|
fn first_offset_skips_byte() {
|
||||||
let bytes = vec![
|
let bytes = vec![
|
||||||
// 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
|
// 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
|
||||||
@ -186,6 +189,7 @@ mod round_trip {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
#[allow(clippy::zero_prefixed_literal)]
|
||||||
fn variable_len_struct_encoding() {
|
fn variable_len_struct_encoding() {
|
||||||
let items: Vec<VariableLen> = vec![
|
let items: Vec<VariableLen> = vec![
|
||||||
VariableLen {
|
VariableLen {
|
||||||
@ -274,6 +278,7 @@ mod round_trip {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
#[allow(clippy::zero_prefixed_literal)]
|
||||||
fn offsets_decreasing() {
|
fn offsets_decreasing() {
|
||||||
let bytes = vec![
|
let bytes = vec![
|
||||||
// 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
|
// 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
|
||||||
@ -296,6 +301,7 @@ mod round_trip {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
#[allow(clippy::zero_prefixed_literal)]
|
||||||
fn two_variable_len_options_encoding() {
|
fn two_variable_len_options_encoding() {
|
||||||
let s = TwoVariableLenOptions {
|
let s = TwoVariableLenOptions {
|
||||||
a: 42,
|
a: 42,
|
||||||
|
@ -777,17 +777,17 @@ mod bitlist {
|
|||||||
fn ssz_encode() {
|
fn ssz_encode() {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
BitList0::with_capacity(0).unwrap().as_ssz_bytes(),
|
BitList0::with_capacity(0).unwrap().as_ssz_bytes(),
|
||||||
vec![0b0000_00001],
|
vec![0b0000_0001],
|
||||||
);
|
);
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
BitList1::with_capacity(0).unwrap().as_ssz_bytes(),
|
BitList1::with_capacity(0).unwrap().as_ssz_bytes(),
|
||||||
vec![0b0000_00001],
|
vec![0b0000_0001],
|
||||||
);
|
);
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
BitList1::with_capacity(1).unwrap().as_ssz_bytes(),
|
BitList1::with_capacity(1).unwrap().as_ssz_bytes(),
|
||||||
vec![0b0000_00010],
|
vec![0b0000_0010],
|
||||||
);
|
);
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
|
@ -387,7 +387,7 @@ fn invalid_attestation_wrong_justified_checkpoint() {
|
|||||||
index: 0,
|
index: 0,
|
||||||
reason: AttestationInvalid::WrongJustifiedCheckpoint {
|
reason: AttestationInvalid::WrongJustifiedCheckpoint {
|
||||||
state: Checkpoint {
|
state: Checkpoint {
|
||||||
epoch: Epoch::from(2 as u64),
|
epoch: Epoch::from(2_u64),
|
||||||
root: Hash256::zero(),
|
root: Hash256::zero(),
|
||||||
},
|
},
|
||||||
attestation: Checkpoint {
|
attestation: Checkpoint {
|
||||||
@ -878,7 +878,7 @@ fn invalid_proposer_slashing_proposal_epoch_mismatch() {
|
|||||||
index: 0,
|
index: 0,
|
||||||
reason: ProposerSlashingInvalid::ProposalSlotMismatch(
|
reason: ProposerSlashingInvalid::ProposalSlotMismatch(
|
||||||
Slot::from(0_u64),
|
Slot::from(0_u64),
|
||||||
Slot::from(128 as u64)
|
Slot::from(128_u64)
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
);
|
);
|
||||||
|
@ -181,6 +181,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
#[allow(clippy::assertions_on_constants)]
|
||||||
fn sanity_check_constants() {
|
fn sanity_check_constants() {
|
||||||
assert!(TOTAL_SIZE > SEED_SIZE);
|
assert!(TOTAL_SIZE > SEED_SIZE);
|
||||||
assert!(TOTAL_SIZE > PIVOT_VIEW_SIZE);
|
assert!(TOTAL_SIZE > PIVOT_VIEW_SIZE);
|
||||||
|
@ -397,7 +397,7 @@ mod test {
|
|||||||
|
|
||||||
let merklizer_root_individual_3_bytes = {
|
let merklizer_root_individual_3_bytes = {
|
||||||
let mut m = MerkleHasher::with_depth(depth);
|
let mut m = MerkleHasher::with_depth(depth);
|
||||||
for bytes in reference_bytes.clone().chunks(3) {
|
for bytes in reference_bytes.chunks(3) {
|
||||||
m.write(bytes).expect("should process byte");
|
m.write(bytes).expect("should process byte");
|
||||||
}
|
}
|
||||||
m.finish().expect("should finish")
|
m.finish().expect("should finish")
|
||||||
@ -426,7 +426,7 @@ mod test {
|
|||||||
/// of leaves and a depth.
|
/// of leaves and a depth.
|
||||||
fn compare_reference_with_len(leaves: u64, depth: usize) {
|
fn compare_reference_with_len(leaves: u64, depth: usize) {
|
||||||
let leaves = (0..leaves)
|
let leaves = (0..leaves)
|
||||||
.map(|i| Hash256::from_low_u64_be(i))
|
.map(Hash256::from_low_u64_be)
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
compare_with_reference(&leaves, depth)
|
compare_with_reference(&leaves, depth)
|
||||||
}
|
}
|
||||||
@ -435,7 +435,7 @@ mod test {
|
|||||||
/// results.
|
/// results.
|
||||||
fn compare_new_with_leaf_count(num_leaves: u64, depth: usize) {
|
fn compare_new_with_leaf_count(num_leaves: u64, depth: usize) {
|
||||||
let leaves = (0..num_leaves)
|
let leaves = (0..num_leaves)
|
||||||
.map(|i| Hash256::from_low_u64_be(i))
|
.map(Hash256::from_low_u64_be)
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
let from_depth = {
|
let from_depth = {
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
#![cfg(test)]
|
#![cfg(test)]
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::test_utils::*;
|
use crate::test_utils::*;
|
||||||
|
use std::ops::Mul;
|
||||||
|
|
||||||
ssz_and_tree_hash_tests!(FoundationBeaconState);
|
ssz_and_tree_hash_tests!(FoundationBeaconState);
|
||||||
|
|
||||||
@ -49,13 +50,13 @@ fn test_beacon_proposer_index<T: EthSpec>() {
|
|||||||
|
|
||||||
// Test where we have two validators per slot.
|
// Test where we have two validators per slot.
|
||||||
// 0th candidate should be chosen every time.
|
// 0th candidate should be chosen every time.
|
||||||
let state = build_state(T::slots_per_epoch() as usize * 2);
|
let state = build_state((T::slots_per_epoch() as usize).mul(2));
|
||||||
for i in 0..T::slots_per_epoch() {
|
for i in 0..T::slots_per_epoch() {
|
||||||
test(&state, Slot::from(i), 0);
|
test(&state, Slot::from(i), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test with two validators per slot, first validator has zero balance.
|
// Test with two validators per slot, first validator has zero balance.
|
||||||
let mut state = build_state(T::slots_per_epoch() as usize * 2);
|
let mut state = build_state((T::slots_per_epoch() as usize).mul(2));
|
||||||
let slot0_candidate0 = ith_candidate(&state, Slot::new(0), 0, &spec);
|
let slot0_candidate0 = ith_candidate(&state, Slot::new(0), 0, &spec);
|
||||||
state.validators[slot0_candidate0].effective_balance = 0;
|
state.validators[slot0_candidate0].effective_balance = 0;
|
||||||
test(&state, Slot::new(0), 1);
|
test(&state, Slot::new(0), 1);
|
||||||
@ -74,8 +75,8 @@ fn beacon_proposer_index() {
|
|||||||
/// 1. Using the cache before it's built fails.
|
/// 1. Using the cache before it's built fails.
|
||||||
/// 2. Using the cache after it's build passes.
|
/// 2. Using the cache after it's build passes.
|
||||||
/// 3. Using the cache after it's dropped fails.
|
/// 3. Using the cache after it's dropped fails.
|
||||||
fn test_cache_initialization<'a, T: EthSpec>(
|
fn test_cache_initialization<T: EthSpec>(
|
||||||
state: &'a mut BeaconState<T>,
|
state: &mut BeaconState<T>,
|
||||||
relative_epoch: RelativeEpoch,
|
relative_epoch: RelativeEpoch,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) {
|
) {
|
||||||
@ -126,7 +127,7 @@ fn cache_initialization() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn test_clone_config<E: EthSpec>(base_state: &BeaconState<E>, clone_config: CloneConfig) {
|
fn test_clone_config<E: EthSpec>(base_state: &BeaconState<E>, clone_config: CloneConfig) {
|
||||||
let state = base_state.clone_with(clone_config.clone());
|
let state = base_state.clone_with(clone_config);
|
||||||
if clone_config.committee_caches {
|
if clone_config.committee_caches {
|
||||||
state
|
state
|
||||||
.committee_cache(RelativeEpoch::Previous)
|
.committee_cache(RelativeEpoch::Previous)
|
||||||
@ -260,6 +261,7 @@ fn tree_hash_cache() {
|
|||||||
mod committees {
|
mod committees {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::beacon_state::MinimalEthSpec;
|
use crate::beacon_state::MinimalEthSpec;
|
||||||
|
use std::ops::{Add, Div};
|
||||||
use swap_or_not_shuffle::shuffle_list;
|
use swap_or_not_shuffle::shuffle_list;
|
||||||
|
|
||||||
fn execute_committee_consistency_test<T: EthSpec>(
|
fn execute_committee_consistency_test<T: EthSpec>(
|
||||||
@ -295,7 +297,10 @@ mod committees {
|
|||||||
// of committees in an epoch.
|
// of committees in an epoch.
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
beacon_committees.len() as u64,
|
beacon_committees.len() as u64,
|
||||||
state.get_epoch_committee_count(relative_epoch).unwrap() / T::slots_per_epoch()
|
state
|
||||||
|
.get_epoch_committee_count(relative_epoch)
|
||||||
|
.unwrap()
|
||||||
|
.div(T::slots_per_epoch())
|
||||||
);
|
);
|
||||||
|
|
||||||
for (committee_index, bc) in beacon_committees.iter().enumerate() {
|
for (committee_index, bc) in beacon_committees.iter().enumerate() {
|
||||||
@ -372,10 +377,11 @@ mod committees {
|
|||||||
fn committee_consistency_test_suite<T: EthSpec>(cached_epoch: RelativeEpoch) {
|
fn committee_consistency_test_suite<T: EthSpec>(cached_epoch: RelativeEpoch) {
|
||||||
let spec = T::default_spec();
|
let spec = T::default_spec();
|
||||||
|
|
||||||
let validator_count = spec.max_committees_per_slot
|
let validator_count = spec
|
||||||
* T::slots_per_epoch() as usize
|
.max_committees_per_slot
|
||||||
* spec.target_committee_size
|
.mul(T::slots_per_epoch() as usize)
|
||||||
+ 1;
|
.mul(spec.target_committee_size)
|
||||||
|
.add(1);
|
||||||
|
|
||||||
committee_consistency_test::<T>(validator_count as usize, Epoch::new(0), cached_epoch);
|
committee_consistency_test::<T>(validator_count as usize, Epoch::new(0), cached_epoch);
|
||||||
|
|
||||||
@ -387,7 +393,10 @@ mod committees {
|
|||||||
|
|
||||||
committee_consistency_test::<T>(
|
committee_consistency_test::<T>(
|
||||||
validator_count as usize,
|
validator_count as usize,
|
||||||
T::genesis_epoch() + T::slots_per_historical_root() as u64 * T::slots_per_epoch() * 4,
|
T::genesis_epoch()
|
||||||
|
+ (T::slots_per_historical_root() as u64)
|
||||||
|
.mul(T::slots_per_epoch())
|
||||||
|
.mul(4),
|
||||||
cached_epoch,
|
cached_epoch,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -405,6 +405,7 @@ mod tests {
|
|||||||
let _ = ChainSpec::mainnet();
|
let _ = ChainSpec::mainnet();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::useless_vec)]
|
||||||
fn test_domain(domain_type: Domain, raw_domain: u32, spec: &ChainSpec) {
|
fn test_domain(domain_type: Domain, raw_domain: u32, spec: &ChainSpec) {
|
||||||
let previous_version = [0, 0, 0, 1];
|
let previous_version = [0, 0, 0, 1];
|
||||||
let current_version = [0, 0, 0, 2];
|
let current_version = [0, 0, 0, 2];
|
||||||
|
@ -137,9 +137,11 @@ mod test {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn zeroed_validator() {
|
fn zeroed_validator() {
|
||||||
let mut v = Validator::default();
|
let v = Validator {
|
||||||
v.activation_eligibility_epoch = Epoch::from(0u64);
|
activation_eligibility_epoch: Epoch::from(0u64),
|
||||||
v.activation_epoch = Epoch::from(0u64);
|
activation_epoch: Epoch::from(0u64),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
test_validator_tree_hash(&v);
|
test_validator_tree_hash(&v);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -153,6 +155,7 @@ mod test {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
#[allow(clippy::assertions_on_constants)]
|
||||||
pub fn smallvec_size_check() {
|
pub fn smallvec_size_check() {
|
||||||
// If this test fails we need to go and reassess the length of the `SmallVec` in
|
// If this test fails we need to go and reassess the length of the `SmallVec` in
|
||||||
// `cached_tree_hash::TreeHashCache`. If the size of the `SmallVec` is too slow we're going
|
// `cached_tree_hash::TreeHashCache`. If the size of the `SmallVec` is too slow we're going
|
||||||
|
@ -135,7 +135,7 @@ pub mod tests_commons {
|
|||||||
pub fn assert_backend_new_error(matches: &ArgMatches, error_msg: &str) {
|
pub fn assert_backend_new_error(matches: &ArgMatches, error_msg: &str) {
|
||||||
match Backend::new(matches, &get_null_logger()) {
|
match Backend::new(matches, &get_null_logger()) {
|
||||||
Ok(_) => panic!("This invocation to Backend::new() should return error"),
|
Ok(_) => panic!("This invocation to Backend::new() should return error"),
|
||||||
Err(e) => assert_eq!(e.to_string(), error_msg),
|
Err(e) => assert_eq!(e, error_msg),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -188,7 +188,7 @@ pub mod backend_new {
|
|||||||
|
|
||||||
match result {
|
match result {
|
||||||
Ok(_) => panic!("This invocation to Backend::new() should return error"),
|
Ok(_) => panic!("This invocation to Backend::new() should return error"),
|
||||||
Err(e) => assert_eq!(e.to_string(), "Storage Raw Dir: PermissionDenied",),
|
Err(e) => assert_eq!(e, "Storage Raw Dir: PermissionDenied",),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -107,7 +107,7 @@ fn surrounds_existing_single_val_single_chunk() {
|
|||||||
fn surrounds_existing_multi_vals_single_chunk() {
|
fn surrounds_existing_multi_vals_single_chunk() {
|
||||||
let validators = vec![0, 16, 1024, 300_000, 300_001];
|
let validators = vec![0, 16, 1024, 300_000, 300_001];
|
||||||
let att1 = indexed_att(validators.clone(), 1, 2, 0);
|
let att1 = indexed_att(validators.clone(), 1, 2, 0);
|
||||||
let att2 = indexed_att(validators.clone(), 0, 3, 0);
|
let att2 = indexed_att(validators, 0, 3, 0);
|
||||||
let slashings = hashset![att_slashing(&att2, &att1)];
|
let slashings = hashset![att_slashing(&att2, &att1)];
|
||||||
slasher_test_indiv(&[att1, att2], &slashings, 3);
|
slasher_test_indiv(&[att1, att2], &slashings, 3);
|
||||||
}
|
}
|
||||||
|
@ -9,7 +9,7 @@ use types::{Epoch, EthSpec};
|
|||||||
fn empty_pruning() {
|
fn empty_pruning() {
|
||||||
let tempdir = tempdir().unwrap();
|
let tempdir = tempdir().unwrap();
|
||||||
let config = Config::new(tempdir.path().into());
|
let config = Config::new(tempdir.path().into());
|
||||||
let slasher = Slasher::<E>::open(config.clone(), logger()).unwrap();
|
let slasher = Slasher::<E>::open(config, logger()).unwrap();
|
||||||
slasher.prune_database(Epoch::new(0)).unwrap();
|
slasher.prune_database(Epoch::new(0)).unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -62,7 +62,7 @@ fn random_test(seed: u64, test_config: TestConfig) {
|
|||||||
.choose_multiple(&mut rng, num_attesters)
|
.choose_multiple(&mut rng, num_attesters)
|
||||||
.copied()
|
.copied()
|
||||||
.collect::<Vec<u64>>();
|
.collect::<Vec<u64>>();
|
||||||
attesting_indices.sort();
|
attesting_indices.sort_unstable();
|
||||||
|
|
||||||
// If checking slashings, generate valid attestations in range.
|
// If checking slashings, generate valid attestations in range.
|
||||||
let (source, target) = if check_slashings {
|
let (source, target) = if check_slashings {
|
||||||
|
@ -30,7 +30,7 @@ fn attestation_pruning_empty_wrap_around() {
|
|||||||
|
|
||||||
// Add an attestation that would be surrounded with the modulo considered
|
// Add an attestation that would be surrounded with the modulo considered
|
||||||
slasher.accept_attestation(indexed_att(
|
slasher.accept_attestation(indexed_att(
|
||||||
v.clone(),
|
v,
|
||||||
2 * history_length - 3,
|
2 * history_length - 3,
|
||||||
2 * history_length - 2,
|
2 * history_length - 2,
|
||||||
1,
|
1,
|
||||||
@ -48,7 +48,7 @@ fn pruning_with_map_full() {
|
|||||||
config.history_length = 1024;
|
config.history_length = 1024;
|
||||||
config.max_db_size_mbs = 1;
|
config.max_db_size_mbs = 1;
|
||||||
|
|
||||||
let slasher = Slasher::open(config.clone(), logger()).unwrap();
|
let slasher = Slasher::open(config, logger()).unwrap();
|
||||||
|
|
||||||
let v = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
|
let v = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
|
||||||
|
|
||||||
|
@ -306,7 +306,7 @@ fn invalid_surrounding_from_first_source() {
|
|||||||
let second = attestation_data_builder(3, 4);
|
let second = attestation_data_builder(3, 4);
|
||||||
StreamTest {
|
StreamTest {
|
||||||
cases: vec![
|
cases: vec![
|
||||||
Test::single(first.clone()),
|
Test::single(first),
|
||||||
Test::single(second.clone()),
|
Test::single(second.clone()),
|
||||||
Test::single(attestation_data_builder(2, 5)).expect_invalid_att(
|
Test::single(attestation_data_builder(2, 5)).expect_invalid_att(
|
||||||
InvalidAttestation::NewSurroundsPrev {
|
InvalidAttestation::NewSurroundsPrev {
|
||||||
@ -326,8 +326,8 @@ fn invalid_surrounding_multiple_votes() {
|
|||||||
let third = attestation_data_builder(2, 3);
|
let third = attestation_data_builder(2, 3);
|
||||||
StreamTest {
|
StreamTest {
|
||||||
cases: vec![
|
cases: vec![
|
||||||
Test::single(first.clone()),
|
Test::single(first),
|
||||||
Test::single(second.clone()),
|
Test::single(second),
|
||||||
Test::single(third.clone()),
|
Test::single(third.clone()),
|
||||||
Test::single(attestation_data_builder(0, 4)).expect_invalid_att(
|
Test::single(attestation_data_builder(0, 4)).expect_invalid_att(
|
||||||
InvalidAttestation::NewSurroundsPrev {
|
InvalidAttestation::NewSurroundsPrev {
|
||||||
|
@ -69,7 +69,7 @@ fn valid_same_block_different_validator() {
|
|||||||
registered_validators: vec![pubkey(0), pubkey(1)],
|
registered_validators: vec![pubkey(0), pubkey(1)],
|
||||||
cases: vec![
|
cases: vec![
|
||||||
Test::with_pubkey(pubkey(0), block.clone()),
|
Test::with_pubkey(pubkey(0), block.clone()),
|
||||||
Test::with_pubkey(pubkey(1), block.clone()),
|
Test::with_pubkey(pubkey(1), block),
|
||||||
],
|
],
|
||||||
}
|
}
|
||||||
.run()
|
.run()
|
||||||
|
@ -119,6 +119,7 @@ mod test {
|
|||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
#[allow(clippy::eq_op)]
|
||||||
fn signing_root_partial_eq() {
|
fn signing_root_partial_eq() {
|
||||||
let h0 = SigningRoot(Hash256::zero());
|
let h0 = SigningRoot(Hash256::zero());
|
||||||
let h1 = SigningRoot(Hash256::repeat_byte(1));
|
let h1 = SigningRoot(Hash256::repeat_byte(1));
|
||||||
|
@ -88,7 +88,7 @@ impl<E: EthSpec> ForkServiceBuilder<slot_clock::TestingSlotClock, E> {
|
|||||||
eth2::Url::parse("http://127.0.0.1").unwrap(),
|
eth2::Url::parse("http://127.0.0.1").unwrap(),
|
||||||
))];
|
))];
|
||||||
let mut beacon_nodes = BeaconNodeFallback::new(candidates, spec, log.clone());
|
let mut beacon_nodes = BeaconNodeFallback::new(candidates, spec, log.clone());
|
||||||
beacon_nodes.set_slot_clock(slot_clock.clone());
|
beacon_nodes.set_slot_clock(slot_clock);
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
fork: Some(types::Fork::default()),
|
fork: Some(types::Fork::default()),
|
||||||
|
Loading…
Reference in New Issue
Block a user