Fix clippy's performance lints (#1286)

* Fix clippy perf lints

* Cargo fmt

* Add  and  to lint rule in Makefile

* Fix some leftover clippy lints
This commit is contained in:
pscott 2020-06-25 16:04:08 +02:00 committed by GitHub
parent b3c01bf09d
commit 02174e21d8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
26 changed files with 82 additions and 84 deletions

1
Cargo.lock generated
View File

@ -336,7 +336,6 @@ dependencies = [
"slog-term", "slog-term",
"store", "store",
"tokio 0.2.21", "tokio 0.2.21",
"toml",
"types", "types",
"version", "version",
] ]

View File

@ -51,9 +51,9 @@ test: test-release
test-full: cargo-fmt test-release test-debug test-ef test-full: cargo-fmt test-release test-debug test-ef
# Lints the code for bad style and potentially unsafe arithmetic using Clippy. # Lints the code for bad style and potentially unsafe arithmetic using Clippy.
# Clippy lints are opt-in per-crate for now, which is why we allow all by default. # Clippy lints are opt-in per-crate for now. By default, everything is allowed except for performance and correctness lints.
lint: lint:
cargo clippy --all -- -A clippy::all cargo clippy --all -- -A clippy::all --D clippy::perf --D clippy::correctness
# Runs the makefile in the `ef_tests` repo. # Runs the makefile in the `ef_tests` repo.
# #

View File

@ -197,7 +197,7 @@ impl<T: Item, E: EthSpec> AutoPruningContainer<T, E> {
.map(|(_epoch, item)| item.len()) .map(|(_epoch, item)| item.len())
.fold((0, 0), |(count, sum), len| (count + 1, sum + len)); .fold((0, 0), |(count, sum), len| (count + 1, sum + len));
let initial_capacity = sum.checked_div(count).unwrap_or(T::default_capacity()); let initial_capacity = sum.checked_div(count).unwrap_or_else(T::default_capacity);
let mut item = T::with_capacity(initial_capacity); let mut item = T::with_capacity(initial_capacity);
item.insert(validator_index); item.insert(validator_index);

View File

@ -444,7 +444,7 @@ where
}; };
let sk = &self.keypairs[proposer_index].sk; let sk = &self.keypairs[proposer_index].sk;
let fork = &state.fork.clone(); let fork = &state.fork;
let randao_reveal = { let randao_reveal = {
let epoch = slot.epoch(E::slots_per_epoch()); let epoch = slot.epoch(E::slots_per_epoch());
@ -605,7 +605,7 @@ where
selection_proof.is_aggregator(bc.committee.len(), spec).unwrap_or(false) selection_proof.is_aggregator(bc.committee.len(), spec).unwrap_or(false)
}) })
.copied() .copied()
.expect(&format!( .unwrap_or_else(|| panic!(
"Committee {} at slot {} with {} attesting validators does not have any aggregators", "Committee {} at slot {} with {} attesting validators does not have any aggregators",
bc.index, state.slot, bc.committee.len() bc.index, state.slot, bc.committee.len()
)); ));

View File

@ -70,7 +70,7 @@ pub enum DelegateIn<TSpec: EthSpec> {
pub enum DelegateOut<TSpec: EthSpec> { pub enum DelegateOut<TSpec: EthSpec> {
Gossipsub(<GossipHandler as ProtocolsHandler>::OutEvent), Gossipsub(<GossipHandler as ProtocolsHandler>::OutEvent),
RPC(<RPCHandler<TSpec> as ProtocolsHandler>::OutEvent), RPC(<RPCHandler<TSpec> as ProtocolsHandler>::OutEvent),
Identify(<IdentifyHandler as ProtocolsHandler>::OutEvent), Identify(Box<<IdentifyHandler as ProtocolsHandler>::OutEvent>),
} }
/// Wrapper around the `ProtocolsHandler::Error` types of the handlers. /// Wrapper around the `ProtocolsHandler::Error` types of the handlers.
@ -342,7 +342,9 @@ impl<TSpec: EthSpec> ProtocolsHandler for DelegatingHandler<TSpec> {
match self.identify_handler.poll(cx) { match self.identify_handler.poll(cx) {
Poll::Ready(ProtocolsHandlerEvent::Custom(event)) => { Poll::Ready(ProtocolsHandlerEvent::Custom(event)) => {
return Poll::Ready(ProtocolsHandlerEvent::Custom(DelegateOut::Identify(event))); return Poll::Ready(ProtocolsHandlerEvent::Custom(DelegateOut::Identify(
Box::new(event),
)));
} }
Poll::Ready(ProtocolsHandlerEvent::Close(event)) => { Poll::Ready(ProtocolsHandlerEvent::Close(event)) => {
return Poll::Ready(ProtocolsHandlerEvent::Close(DelegateError::Identify(event))); return Poll::Ready(ProtocolsHandlerEvent::Close(DelegateError::Identify(event)));

View File

@ -42,7 +42,7 @@ pub enum BehaviourHandlerIn<TSpec: EthSpec> {
} }
pub enum BehaviourHandlerOut<TSpec: EthSpec> { pub enum BehaviourHandlerOut<TSpec: EthSpec> {
Delegate(DelegateOut<TSpec>), Delegate(Box<DelegateOut<TSpec>>),
// TODO: replace custom with events to send // TODO: replace custom with events to send
Custom, Custom,
} }
@ -119,7 +119,7 @@ impl<TSpec: EthSpec> ProtocolsHandler for BehaviourHandler<TSpec> {
match self.delegate.poll(cx) { match self.delegate.poll(cx) {
Poll::Ready(ProtocolsHandlerEvent::Custom(event)) => { Poll::Ready(ProtocolsHandlerEvent::Custom(event)) => {
return Poll::Ready(ProtocolsHandlerEvent::Custom( return Poll::Ready(ProtocolsHandlerEvent::Custom(
BehaviourHandlerOut::Delegate(event), BehaviourHandlerOut::Delegate(Box::new(event)),
)) ))
} }
Poll::Ready(ProtocolsHandlerEvent::Close(err)) => { Poll::Ready(ProtocolsHandlerEvent::Close(err)) => {

View File

@ -160,10 +160,10 @@ impl<TSpec: EthSpec> NetworkBehaviour for Behaviour<TSpec> {
) { ) {
match event { match event {
// Events comming from the handler, redirected to each behaviour // Events comming from the handler, redirected to each behaviour
BehaviourHandlerOut::Delegate(delegate) => match delegate { BehaviourHandlerOut::Delegate(delegate) => match *delegate {
DelegateOut::Gossipsub(ev) => self.gossipsub.inject_event(peer_id, conn_id, ev), DelegateOut::Gossipsub(ev) => self.gossipsub.inject_event(peer_id, conn_id, ev),
DelegateOut::RPC(ev) => self.eth2_rpc.inject_event(peer_id, conn_id, ev), DelegateOut::RPC(ev) => self.eth2_rpc.inject_event(peer_id, conn_id, ev),
DelegateOut::Identify(ev) => self.identify.inject_event(peer_id, conn_id, ev), DelegateOut::Identify(ev) => self.identify.inject_event(peer_id, conn_id, *ev),
}, },
/* Custom events sent BY the handler */ /* Custom events sent BY the handler */
BehaviourHandlerOut::Custom => { BehaviourHandlerOut::Custom => {

View File

@ -51,7 +51,7 @@ const FIND_NODE_QUERY_CLOSEST_PEERS: usize = 16;
pub enum DiscoveryEvent { pub enum DiscoveryEvent {
/// A query has completed. The first parameter is the `min_ttl` of the peers if it is specified /// A query has completed. The first parameter is the `min_ttl` of the peers if it is specified
/// and the second parameter are the discovered peers. /// and the second parameter are the discovered peers.
QueryResult(Option<Instant>, Box<Vec<Enr>>), QueryResult(Option<Instant>, Vec<Enr>),
/// This indicates that our local UDP socketaddr has been updated and we should inform libp2p. /// This indicates that our local UDP socketaddr has been updated and we should inform libp2p.
SocketUpdated(SocketAddr), SocketUpdated(SocketAddr),
} }
@ -328,7 +328,7 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
.enr_insert(BITFIELD_ENR_KEY, current_bitfield.as_ssz_bytes()); .enr_insert(BITFIELD_ENR_KEY, current_bitfield.as_ssz_bytes());
// replace the global version // replace the global version
*self.network_globals.local_enr.write() = self.discv5.local_enr().clone(); *self.network_globals.local_enr.write() = self.discv5.local_enr();
Ok(()) Ok(())
} }
@ -360,7 +360,7 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
}); });
// replace the global version with discovery version // replace the global version with discovery version
*self.network_globals.local_enr.write() = self.discv5.local_enr().clone(); *self.network_globals.local_enr.write() = self.discv5.local_enr();
} }
/* Internal Functions */ /* Internal Functions */
@ -602,7 +602,7 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
self.cached_enrs.put(enr.peer_id(), enr); self.cached_enrs.put(enr.peer_id(), enr);
} }
// return the result to the peer manager // return the result to the peer manager
return Poll::Ready(DiscoveryEvent::QueryResult(min_ttl, Box::new(result))); return Poll::Ready(DiscoveryEvent::QueryResult(min_ttl, result));
} }
// Process the server event stream // Process the server event stream

View File

@ -422,7 +422,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
/// with a new `PeerId` which involves a discovery routing table lookup. We could dial the /// with a new `PeerId` which involves a discovery routing table lookup. We could dial the
/// multiaddr here, however this could relate to duplicate PeerId's etc. If the lookup /// multiaddr here, however this could relate to duplicate PeerId's etc. If the lookup
/// proves resource constraining, we should switch to multiaddr dialling here. /// proves resource constraining, we should switch to multiaddr dialling here.
fn peers_discovered(&mut self, peers: Vec<Enr>, min_ttl: Option<Instant>) { fn peers_discovered(&mut self, peers: &[Enr], min_ttl: Option<Instant>) {
for enr in peers { for enr in peers {
let peer_id = enr.peer_id(); let peer_id = enr.peer_id();
@ -623,7 +623,7 @@ impl<TSpec: EthSpec> Stream for PeerManager<TSpec> {
match event { match event {
DiscoveryEvent::SocketUpdated(socket_addr) => self.socket_updated(socket_addr), DiscoveryEvent::SocketUpdated(socket_addr) => self.socket_updated(socket_addr),
DiscoveryEvent::QueryResult(min_ttl, peers) => { DiscoveryEvent::QueryResult(min_ttl, peers) => {
self.peers_discovered(*peers, min_ttl) self.peers_discovered(&peers, min_ttl)
} }
} }
} }

View File

@ -169,7 +169,7 @@ where
/// A response has been sent, pending writing. /// A response has been sent, pending writing.
ResponsePendingSend { ResponsePendingSend {
/// The substream used to send the response /// The substream used to send the response
substream: InboundFramed<NegotiatedSubstream, TSpec>, substream: Box<InboundFramed<NegotiatedSubstream, TSpec>>,
/// The message that is attempting to be sent. /// The message that is attempting to be sent.
message: RPCCodedResponse<TSpec>, message: RPCCodedResponse<TSpec>,
/// Whether a stream termination is requested. If true the stream will be closed after /// Whether a stream termination is requested. If true the stream will be closed after
@ -180,7 +180,7 @@ where
/// A response has been sent, pending flush. /// A response has been sent, pending flush.
ResponsePendingFlush { ResponsePendingFlush {
/// The substream used to send the response /// The substream used to send the response
substream: InboundFramed<NegotiatedSubstream, TSpec>, substream: Box<InboundFramed<NegotiatedSubstream, TSpec>>,
/// Whether a stream termination is requested. If true the stream will be closed after /// Whether a stream termination is requested. If true the stream will be closed after
/// this send. Otherwise it will transition to an idle state until a stream termination is /// this send. Otherwise it will transition to an idle state until a stream termination is
/// requested or a timeout is reached. /// requested or a timeout is reached.
@ -188,9 +188,9 @@ where
}, },
/// The response stream is idle and awaiting input from the application to send more chunked /// The response stream is idle and awaiting input from the application to send more chunked
/// responses. /// responses.
ResponseIdle(InboundFramed<NegotiatedSubstream, TSpec>), ResponseIdle(Box<InboundFramed<NegotiatedSubstream, TSpec>>),
/// The substream is attempting to shutdown. /// The substream is attempting to shutdown.
Closing(InboundFramed<NegotiatedSubstream, TSpec>), Closing(Box<InboundFramed<NegotiatedSubstream, TSpec>>),
/// Temporary state during processing /// Temporary state during processing
Poisoned, Poisoned,
} }
@ -201,12 +201,12 @@ pub enum OutboundSubstreamState<TSpec: EthSpec> {
/// handler because GOODBYE requests can be handled and responses dropped instantly. /// handler because GOODBYE requests can be handled and responses dropped instantly.
RequestPendingResponse { RequestPendingResponse {
/// The framed negotiated substream. /// The framed negotiated substream.
substream: OutboundFramed<NegotiatedSubstream, TSpec>, substream: Box<OutboundFramed<NegotiatedSubstream, TSpec>>,
/// Keeps track of the actual request sent. /// Keeps track of the actual request sent.
request: RPCRequest<TSpec>, request: RPCRequest<TSpec>,
}, },
/// Closing an outbound substream> /// Closing an outbound substream>
Closing(OutboundFramed<NegotiatedSubstream, TSpec>), Closing(Box<OutboundFramed<NegotiatedSubstream, TSpec>>),
/// Temporary state during processing /// Temporary state during processing
Poisoned, Poisoned,
} }
@ -551,7 +551,7 @@ where
self.current_inbound_substream_id, self.current_inbound_substream_id,
Duration::from_secs(RESPONSE_TIMEOUT), Duration::from_secs(RESPONSE_TIMEOUT),
); );
let awaiting_stream = InboundSubstreamState::ResponseIdle(substream); let awaiting_stream = InboundSubstreamState::ResponseIdle(Box::new(substream));
self.inbound_substreams.insert( self.inbound_substreams.insert(
self.current_inbound_substream_id, self.current_inbound_substream_id,
(awaiting_stream, Some(delay_key), req.protocol()), (awaiting_stream, Some(delay_key), req.protocol()),
@ -593,7 +593,7 @@ where
Duration::from_secs(RESPONSE_TIMEOUT), Duration::from_secs(RESPONSE_TIMEOUT),
); );
let awaiting_stream = OutboundSubstreamState::RequestPendingResponse { let awaiting_stream = OutboundSubstreamState::RequestPendingResponse {
substream: out, substream: Box::new(out),
request, request,
}; };
let expected_responses = if expected_responses > 1 { let expected_responses = if expected_responses > 1 {
@ -833,7 +833,7 @@ where
// await flush // await flush
entry.get_mut().0 = entry.get_mut().0 =
InboundSubstreamState::ResponsePendingFlush { InboundSubstreamState::ResponsePendingFlush {
substream, substream: substream,
closing, closing,
}; };
drive_stream_further = true; drive_stream_further = true;
@ -853,7 +853,7 @@ where
} else { } else {
// check for queued chunks and update the stream // check for queued chunks and update the stream
entry.get_mut().0 = apply_queued_responses( entry.get_mut().0 = apply_queued_responses(
substream, *substream,
&mut self &mut self
.queued_outbound_items .queued_outbound_items
.get_mut(&request_id), .get_mut(&request_id),
@ -908,7 +908,7 @@ where
} else { } else {
// check for queued chunks and update the stream // check for queued chunks and update the stream
entry.get_mut().0 = apply_queued_responses( entry.get_mut().0 = apply_queued_responses(
substream, *substream,
&mut self &mut self
.queued_outbound_items .queued_outbound_items
.get_mut(&request_id), .get_mut(&request_id),
@ -942,7 +942,7 @@ where
InboundSubstreamState::ResponseIdle(substream) => { InboundSubstreamState::ResponseIdle(substream) => {
if !deactivated { if !deactivated {
entry.get_mut().0 = apply_queued_responses( entry.get_mut().0 = apply_queued_responses(
substream, *substream,
&mut self.queued_outbound_items.get_mut(&request_id), &mut self.queued_outbound_items.get_mut(&request_id),
&mut drive_stream_further, &mut drive_stream_further,
); );
@ -1190,10 +1190,10 @@ fn apply_queued_responses<TSpec: EthSpec>(
match queue.remove(0) { match queue.remove(0) {
RPCCodedResponse::StreamTermination(_) => { RPCCodedResponse::StreamTermination(_) => {
// close the stream if this is a stream termination // close the stream if this is a stream termination
InboundSubstreamState::Closing(substream) InboundSubstreamState::Closing(Box::new(substream))
} }
chunk => InboundSubstreamState::ResponsePendingSend { chunk => InboundSubstreamState::ResponsePendingSend {
substream, substream: Box::new(substream),
message: chunk, message: chunk,
closing: false, closing: false,
}, },
@ -1201,7 +1201,7 @@ fn apply_queued_responses<TSpec: EthSpec>(
} }
_ => { _ => {
// no items queued set to idle // no items queued set to idle
InboundSubstreamState::ResponseIdle(substream) InboundSubstreamState::ResponseIdle(Box::new(substream))
} }
} }
} }

View File

@ -237,7 +237,7 @@ impl<T: BeaconChainTypes> AttestationService<T> {
&mut self, &mut self,
_message_id: &MessageId, _message_id: &MessageId,
peer_id: &PeerId, peer_id: &PeerId,
subnet: &SubnetId, subnet: SubnetId,
attestation: &Attestation<T::EthSpec>, attestation: &Attestation<T::EthSpec>,
) -> bool { ) -> bool {
// verify the attestation is on the correct subnet // verify the attestation is on the correct subnet
@ -249,7 +249,7 @@ impl<T: BeaconChainTypes> AttestationService<T> {
} }
}; };
if expected_subnet != *subnet { if expected_subnet != subnet {
warn!(self.log, "Received an attestation on the wrong subnet"; "subnet_received" => format!("{:?}", subnet), "subnet_expected" => format!("{:?}",expected_subnet), "peer_id" => format!("{}", peer_id)); warn!(self.log, "Received an attestation on the wrong subnet"; "subnet_received" => format!("{:?}", subnet), "subnet_expected" => format!("{:?}",expected_subnet), "peer_id" => format!("{}", peer_id));
return false; return false;
} }

View File

@ -218,11 +218,9 @@ impl<T: BeaconChainTypes> Router<T> {
match gossip_message { match gossip_message {
// Attestations should never reach the router. // Attestations should never reach the router.
PubsubMessage::AggregateAndProofAttestation(aggregate_and_proof) => { PubsubMessage::AggregateAndProofAttestation(aggregate_and_proof) => {
if let Some(gossip_verified) = if let Some(gossip_verified) = self
self.processor.verify_aggregated_attestation_for_gossip( .processor
peer_id.clone(), .verify_aggregated_attestation_for_gossip(peer_id.clone(), *aggregate_and_proof)
*aggregate_and_proof.clone(),
)
{ {
self.propagate_message(id, peer_id.clone()); self.propagate_message(id, peer_id.clone());
self.processor self.processor

View File

@ -300,7 +300,7 @@ fn spawn_service<T: BeaconChainTypes>(
if service.attestation_service.should_process_attestation( if service.attestation_service.should_process_attestation(
&id, &id,
&source, &source,
subnet, *subnet,
attestation, attestation,
) { ) {
let _ = service let _ = service

View File

@ -529,7 +529,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
// otherwise, this is a range sync issue, notify the range sync // otherwise, this is a range sync issue, notify the range sync
self.range_sync self.range_sync
.inject_error(&mut self.network, peer_id.clone(), request_id); .inject_error(&mut self.network, peer_id, request_id);
} }
fn peer_disconnect(&mut self, peer_id: &PeerId) { fn peer_disconnect(&mut self, peer_id: &PeerId) {
@ -667,7 +667,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
"last_peer" => format!("{:?}", parent_request.last_submitted_peer), "last_peer" => format!("{:?}", parent_request.last_submitted_peer),
); );
self.network self.network
.downvote_peer(parent_request.last_submitted_peer.clone()); .downvote_peer(parent_request.last_submitted_peer);
return; return;
} }
Err(e) => { Err(e) => {
@ -677,7 +677,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
"last_peer" => format!("{:?}", parent_request.last_submitted_peer), "last_peer" => format!("{:?}", parent_request.last_submitted_peer),
); );
self.network self.network
.downvote_peer(parent_request.last_submitted_peer.clone()); .downvote_peer(parent_request.last_submitted_peer);
return; return;
} }
} }

View File

@ -467,6 +467,7 @@ pub async fn publish_attestations<T: BeaconChainTypes>(
/// Processes an unaggregrated attestation that was included in a list of attestations with the /// Processes an unaggregrated attestation that was included in a list of attestations with the
/// index `i`. /// index `i`.
#[allow(clippy::redundant_clone)] // false positives in this function.
fn process_unaggregated_attestation<T: BeaconChainTypes>( fn process_unaggregated_attestation<T: BeaconChainTypes>(
beacon_chain: &BeaconChain<T>, beacon_chain: &BeaconChain<T>,
network_chan: NetworkChannel<T::EthSpec>, network_chan: NetworkChannel<T::EthSpec>,
@ -535,6 +536,7 @@ fn process_unaggregated_attestation<T: BeaconChainTypes>(
} }
/// HTTP Handler to publish an Attestation, which has been signed by a validator. /// HTTP Handler to publish an Attestation, which has been signed by a validator.
#[allow(clippy::redundant_clone)] // false positives in this function.
pub async fn publish_aggregate_and_proofs<T: BeaconChainTypes>( pub async fn publish_aggregate_and_proofs<T: BeaconChainTypes>(
req: Request<Body>, req: Request<Body>,
beacon_chain: Arc<BeaconChain<T>>, beacon_chain: Arc<BeaconChain<T>>,
@ -582,6 +584,7 @@ pub async fn publish_aggregate_and_proofs<T: BeaconChainTypes>(
/// Processes an aggregrated attestation that was included in a list of attestations with the index /// Processes an aggregrated attestation that was included in a list of attestations with the index
/// `i`. /// `i`.
#[allow(clippy::redundant_clone)] // false positives in this function.
fn process_aggregated_attestation<T: BeaconChainTypes>( fn process_aggregated_attestation<T: BeaconChainTypes>(
beacon_chain: &BeaconChain<T>, beacon_chain: &BeaconChain<T>,
network_chan: NetworkChannel<T::EthSpec>, network_chan: NetworkChannel<T::EthSpec>,

View File

@ -39,7 +39,7 @@ pub fn get_config<E: EthSpec>(
fs::remove_dir_all( fs::remove_dir_all(
client_config client_config
.get_db_path() .get_db_path()
.ok_or("Failed to get db_path".to_string())?, .ok_or_else(|| "Failed to get db_path".to_string())?,
) )
.map_err(|err| format!("Failed to remove chain_db: {}", err))?; .map_err(|err| format!("Failed to remove chain_db: {}", err))?;
@ -47,7 +47,7 @@ pub fn get_config<E: EthSpec>(
fs::remove_dir_all( fs::remove_dir_all(
client_config client_config
.get_freezer_db_path() .get_freezer_db_path()
.ok_or("Failed to get freezer db path".to_string())?, .ok_or_else(|| "Failed to get freezer db path".to_string())?,
) )
.map_err(|err| format!("Failed to remove chain_db: {}", err))?; .map_err(|err| format!("Failed to remove chain_db: {}", err))?;
@ -287,7 +287,7 @@ pub fn get_config<E: EthSpec>(
if spec_constants != client_config.spec_constants { if spec_constants != client_config.spec_constants {
crit!(log, "Specification constants do not match."; crit!(log, "Specification constants do not match.";
"client_config" => client_config.spec_constants.to_string(), "client_config" => client_config.spec_constants,
"eth2_config" => spec_constants "eth2_config" => spec_constants
); );
return Err("Specification constant mismatch".into()); return Err("Specification constant mismatch".into());

View File

@ -10,12 +10,13 @@ const TESTNET_ID: &str = "witti-v0-11-3";
fn main() { fn main() {
if !base_dir().exists() { if !base_dir().exists() {
std::fs::create_dir_all(base_dir()).expect(&format!("Unable to create {:?}", base_dir())); std::fs::create_dir_all(base_dir())
.unwrap_or_else(|_| panic!("Unable to create {:?}", base_dir()));
match get_all_files() { match get_all_files() {
Ok(()) => (), Ok(()) => (),
Err(e) => { Err(e) => {
std::fs::remove_dir_all(base_dir()).expect(&format!( std::fs::remove_dir_all(base_dir()).unwrap_or_else(|_| panic!(
"{}. Failed to remove {:?}, please remove the directory manually because it may contains incomplete testnet data.", "{}. Failed to remove {:?}, please remove the directory manually because it may contains incomplete testnet data.",
e, e,
base_dir(), base_dir(),

View File

@ -182,7 +182,7 @@ impl<'a> Builder<'a> {
// //
// This allows us to know the RLP data for the eth1 transaction without needing to know // This allows us to know the RLP data for the eth1 transaction without needing to know
// the withdrawal/voting keypairs again at a later date. // the withdrawal/voting keypairs again at a later date.
let path = dir.clone().join(ETH1_DEPOSIT_DATA_FILE); let path = dir.join(ETH1_DEPOSIT_DATA_FILE);
if path.exists() { if path.exists() {
return Err(Error::DepositDataAlreadyExists(path)); return Err(Error::DepositDataAlreadyExists(path));
} else { } else {
@ -191,7 +191,7 @@ impl<'a> Builder<'a> {
.write(true) .write(true)
.read(true) .read(true)
.create(true) .create(true)
.open(path.clone()) .open(path)
.map_err(Error::UnableToSaveDepositData)? .map_err(Error::UnableToSaveDepositData)?
.write_all(hex.as_bytes()) .write_all(hex.as_bytes())
.map_err(Error::UnableToSaveDepositData)? .map_err(Error::UnableToSaveDepositData)?
@ -200,7 +200,7 @@ impl<'a> Builder<'a> {
// Save `ETH1_DEPOSIT_AMOUNT_FILE` to file. // Save `ETH1_DEPOSIT_AMOUNT_FILE` to file.
// //
// This allows us to know the intended deposit amount at a later date. // This allows us to know the intended deposit amount at a later date.
let path = dir.clone().join(ETH1_DEPOSIT_AMOUNT_FILE); let path = dir.join(ETH1_DEPOSIT_AMOUNT_FILE);
if path.exists() { if path.exists() {
return Err(Error::DepositAmountAlreadyExists(path)); return Err(Error::DepositAmountAlreadyExists(path));
} else { } else {
@ -208,7 +208,7 @@ impl<'a> Builder<'a> {
.write(true) .write(true)
.read(true) .read(true)
.create(true) .create(true)
.open(path.clone()) .open(path)
.map_err(Error::UnableToSaveDepositAmount)? .map_err(Error::UnableToSaveDepositAmount)?
.write_all(format!("{}", amount).as_bytes()) .write_all(format!("{}", amount).as_bytes())
.map_err(Error::UnableToSaveDepositAmount)? .map_err(Error::UnableToSaveDepositAmount)?
@ -220,29 +220,24 @@ impl<'a> Builder<'a> {
// Write the withdrawal password to file. // Write the withdrawal password to file.
write_password_to_file( write_password_to_file(
self.password_dir self.password_dir
.clone()
.join(withdrawal_keypair.pk.as_hex_string()), .join(withdrawal_keypair.pk.as_hex_string()),
withdrawal_password.as_bytes(), withdrawal_password.as_bytes(),
)?; )?;
// Write the withdrawal keystore to file. // Write the withdrawal keystore to file.
write_keystore_to_file( write_keystore_to_file(dir.join(WITHDRAWAL_KEYSTORE_FILE), &withdrawal_keystore)?;
dir.clone().join(WITHDRAWAL_KEYSTORE_FILE),
&withdrawal_keystore,
)?;
} }
} }
// Write the voting password to file. // Write the voting password to file.
write_password_to_file( write_password_to_file(
self.password_dir self.password_dir
.clone()
.join(format!("0x{}", voting_keystore.pubkey())), .join(format!("0x{}", voting_keystore.pubkey())),
voting_password.as_bytes(), voting_password.as_bytes(),
)?; )?;
// Write the voting keystore to file. // Write the voting keystore to file.
write_keystore_to_file(dir.clone().join(VOTING_KEYSTORE_FILE), &voting_keystore)?; write_keystore_to_file(dir.join(VOTING_KEYSTORE_FILE), &voting_keystore)?;
ValidatorDir::open(dir).map_err(Error::UnableToOpenDir) ValidatorDir::open(dir).map_err(Error::UnableToOpenDir)
} }
@ -257,7 +252,7 @@ fn write_keystore_to_file(path: PathBuf, keystore: &Keystore) -> Result<(), Erro
.write(true) .write(true)
.read(true) .read(true)
.create_new(true) .create_new(true)
.open(path.clone()) .open(path)
.map_err(Error::UnableToSaveKeystore)?; .map_err(Error::UnableToSaveKeystore)?;
keystore.to_json_writer(file).map_err(Into::into) keystore.to_json_writer(file).map_err(Into::into)

View File

@ -80,10 +80,9 @@ impl ForkChoiceTestDefinition {
finalized_epoch, finalized_epoch,
&justified_state_balances, &justified_state_balances,
) )
.expect(&format!( .unwrap_or_else(|_| {
"find_head op at index {} returned error", panic!("find_head op at index {} returned error", op_index)
op_index });
));
assert_eq!( assert_eq!(
head, expected_head, head, expected_head,
@ -129,10 +128,9 @@ impl ForkChoiceTestDefinition {
justified_epoch, justified_epoch,
finalized_epoch, finalized_epoch,
) )
.expect(&format!( .unwrap_or_else(|_| {
"process_block op at index {} returned error", panic!("process_block op at index {} returned error", op_index)
op_index });
));
check_bytes_round_trip(&fork_choice); check_bytes_round_trip(&fork_choice);
} }
Operation::ProcessAttestation { Operation::ProcessAttestation {
@ -142,10 +140,12 @@ impl ForkChoiceTestDefinition {
} => { } => {
fork_choice fork_choice
.process_attestation(validator_index, block_root, target_epoch) .process_attestation(validator_index, block_root, target_epoch)
.expect(&format!( .unwrap_or_else(|_| {
panic!(
"process_attestation op at index {} returned error", "process_attestation op at index {} returned error",
op_index op_index
)); )
});
check_bytes_round_trip(&fork_choice); check_bytes_round_trip(&fork_choice);
} }
Operation::Prune { Operation::Prune {

View File

@ -91,7 +91,7 @@ pub fn get_ffg_case_01_test_definition() -> ForkChoiceTestDefinition {
justified_epoch: Epoch::new(2), justified_epoch: Epoch::new(2),
justified_root: get_hash(3), justified_root: get_hash(3),
finalized_epoch: Epoch::new(1), finalized_epoch: Epoch::new(1),
justified_state_balances: balances.clone(), justified_state_balances: balances,
expected_head: get_hash(3), expected_head: get_hash(3),
}); });
@ -421,7 +421,7 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition {
justified_epoch: Epoch::new(3), justified_epoch: Epoch::new(3),
justified_root: get_hash(2), justified_root: get_hash(2),
finalized_epoch: Epoch::new(0), finalized_epoch: Epoch::new(0),
justified_state_balances: balances.clone(), justified_state_balances: balances,
}); });
// END OF TESTS // END OF TESTS

View File

@ -212,7 +212,7 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition {
justified_epoch: Epoch::new(2), justified_epoch: Epoch::new(2),
justified_root: get_hash(5), justified_root: get_hash(5),
finalized_epoch: Epoch::new(1), finalized_epoch: Epoch::new(1),
justified_state_balances: balances.clone(), justified_state_balances: balances,
expected_head: get_hash(6), expected_head: get_hash(6),
}, },
]; ];

View File

@ -673,7 +673,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition {
justified_epoch: Epoch::new(2), justified_epoch: Epoch::new(2),
justified_root: get_hash(5), justified_root: get_hash(5),
finalized_epoch: Epoch::new(2), finalized_epoch: Epoch::new(2),
justified_state_balances: balances.clone(), justified_state_balances: balances,
expected_head: get_hash(11), expected_head: get_hash(11),
}); });

View File

@ -83,12 +83,12 @@ impl<T: EthSpec> BeaconBlock<T> {
}; };
let proposer_slashing = ProposerSlashing { let proposer_slashing = ProposerSlashing {
signed_header_1: signed_header.clone(), signed_header_1: signed_header.clone(),
signed_header_2: signed_header.clone(), signed_header_2: signed_header,
}; };
let attester_slashing = AttesterSlashing { let attester_slashing = AttesterSlashing {
attestation_1: indexed_attestation.clone(), attestation_1: indexed_attestation.clone(),
attestation_2: indexed_attestation.clone(), attestation_2: indexed_attestation,
}; };
let attestation: Attestation<T> = Attestation { let attestation: Attestation<T> = Attestation {

View File

@ -190,7 +190,7 @@ impl Keystore {
let keypair = keypair_from_secret(plain_text.as_bytes())?; let keypair = keypair_from_secret(plain_text.as_bytes())?;
// Verify that the derived `PublicKey` matches `self`. // Verify that the derived `PublicKey` matches `self`.
if keypair.pk.as_hex_string()[2..].to_string() != self.json.pubkey { if keypair.pk.as_hex_string()[2..] != self.json.pubkey {
return Err(Error::PublicKeyMismatch); return Err(Error::PublicKeyMismatch);
} }

View File

@ -271,7 +271,7 @@ impl<E: EthSpec> Environment<E> {
executor: TaskExecutor { executor: TaskExecutor {
exit: self.exit.clone(), exit: self.exit.clone(),
handle: self.runtime().handle().clone(), handle: self.runtime().handle().clone(),
log: self.log.new(o!("service" => service_name.clone())), log: self.log.new(o!("service" => service_name)),
}, },
eth_spec_instance: self.eth_spec_instance.clone(), eth_spec_instance: self.eth_spec_instance.clone(),
eth2_config: self.eth2_config.clone(), eth2_config: self.eth2_config.clone(),

View File

@ -230,12 +230,12 @@ impl DutiesStore {
.collect() .collect()
} }
fn is_aggregator(&self, validator_pubkey: &PublicKey, epoch: &Epoch) -> Option<bool> { fn is_aggregator(&self, validator_pubkey: &PublicKey, epoch: Epoch) -> Option<bool> {
Some( Some(
self.store self.store
.read() .read()
.get(validator_pubkey)? .get(validator_pubkey)?
.get(epoch)? .get(&epoch)?
.selection_proof .selection_proof
.is_some(), .is_some(),
) )
@ -602,7 +602,7 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesService<T, E> {
// The selection proof is computed on `store.insert`, so it's necessary to check // The selection proof is computed on `store.insert`, so it's necessary to check
// with the store that the validator is an aggregator. // with the store that the validator is an aggregator.
let is_aggregator = self.store.is_aggregator(&validator_pubkey, &epoch)?; let is_aggregator = self.store.is_aggregator(&validator_pubkey, epoch)?;
if outcome.is_subscription_candidate() { if outcome.is_subscription_candidate() {
Some(ValidatorSubscription { Some(ValidatorSubscription {