diff --git a/Cargo.lock b/Cargo.lock index 92df3d931..ca5c875d6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -336,7 +336,6 @@ dependencies = [ "slog-term", "store", "tokio 0.2.21", - "toml", "types", "version", ] diff --git a/Makefile b/Makefile index 7f8ea3052..76da24603 100644 --- a/Makefile +++ b/Makefile @@ -51,9 +51,9 @@ test: test-release test-full: cargo-fmt test-release test-debug test-ef # Lints the code for bad style and potentially unsafe arithmetic using Clippy. -# Clippy lints are opt-in per-crate for now, which is why we allow all by default. +# Clippy lints are opt-in per-crate for now. By default, everything is allowed except for performance and correctness lints. lint: - cargo clippy --all -- -A clippy::all + cargo clippy --all -- -A clippy::all --D clippy::perf --D clippy::correctness # Runs the makefile in the `ef_tests` repo. # diff --git a/beacon_node/beacon_chain/src/observed_attesters.rs b/beacon_node/beacon_chain/src/observed_attesters.rs index bc428fd5d..8cc3f5070 100644 --- a/beacon_node/beacon_chain/src/observed_attesters.rs +++ b/beacon_node/beacon_chain/src/observed_attesters.rs @@ -197,7 +197,7 @@ impl AutoPruningContainer { .map(|(_epoch, item)| item.len()) .fold((0, 0), |(count, sum), len| (count + 1, sum + len)); - let initial_capacity = sum.checked_div(count).unwrap_or(T::default_capacity()); + let initial_capacity = sum.checked_div(count).unwrap_or_else(T::default_capacity); let mut item = T::with_capacity(initial_capacity); item.insert(validator_index); diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 0565f8c4e..f06d51112 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -444,7 +444,7 @@ where }; let sk = &self.keypairs[proposer_index].sk; - let fork = &state.fork.clone(); + let fork = &state.fork; let randao_reveal = { let epoch = slot.epoch(E::slots_per_epoch()); @@ -605,7 +605,7 @@ where selection_proof.is_aggregator(bc.committee.len(), spec).unwrap_or(false) }) .copied() - .expect(&format!( + .unwrap_or_else(|| panic!( "Committee {} at slot {} with {} attesting validators does not have any aggregators", bc.index, state.slot, bc.committee.len() )); diff --git a/beacon_node/eth2_libp2p/src/behaviour/handler/delegate.rs b/beacon_node/eth2_libp2p/src/behaviour/handler/delegate.rs index b20fe5170..2863efd85 100644 --- a/beacon_node/eth2_libp2p/src/behaviour/handler/delegate.rs +++ b/beacon_node/eth2_libp2p/src/behaviour/handler/delegate.rs @@ -70,7 +70,7 @@ pub enum DelegateIn { pub enum DelegateOut { Gossipsub(::OutEvent), RPC( as ProtocolsHandler>::OutEvent), - Identify(::OutEvent), + Identify(Box<::OutEvent>), } /// Wrapper around the `ProtocolsHandler::Error` types of the handlers. @@ -342,7 +342,9 @@ impl ProtocolsHandler for DelegatingHandler { match self.identify_handler.poll(cx) { Poll::Ready(ProtocolsHandlerEvent::Custom(event)) => { - return Poll::Ready(ProtocolsHandlerEvent::Custom(DelegateOut::Identify(event))); + return Poll::Ready(ProtocolsHandlerEvent::Custom(DelegateOut::Identify( + Box::new(event), + ))); } Poll::Ready(ProtocolsHandlerEvent::Close(event)) => { return Poll::Ready(ProtocolsHandlerEvent::Close(DelegateError::Identify(event))); diff --git a/beacon_node/eth2_libp2p/src/behaviour/handler/mod.rs b/beacon_node/eth2_libp2p/src/behaviour/handler/mod.rs index 64d0bcaca..f87ba6dbe 100644 --- a/beacon_node/eth2_libp2p/src/behaviour/handler/mod.rs +++ b/beacon_node/eth2_libp2p/src/behaviour/handler/mod.rs @@ -42,7 +42,7 @@ pub enum BehaviourHandlerIn { } pub enum BehaviourHandlerOut { - Delegate(DelegateOut), + Delegate(Box>), // TODO: replace custom with events to send Custom, } @@ -119,7 +119,7 @@ impl ProtocolsHandler for BehaviourHandler { match self.delegate.poll(cx) { Poll::Ready(ProtocolsHandlerEvent::Custom(event)) => { return Poll::Ready(ProtocolsHandlerEvent::Custom( - BehaviourHandlerOut::Delegate(event), + BehaviourHandlerOut::Delegate(Box::new(event)), )) } Poll::Ready(ProtocolsHandlerEvent::Close(err)) => { diff --git a/beacon_node/eth2_libp2p/src/behaviour/mod.rs b/beacon_node/eth2_libp2p/src/behaviour/mod.rs index 1aca8a5dd..cd54c7f6d 100644 --- a/beacon_node/eth2_libp2p/src/behaviour/mod.rs +++ b/beacon_node/eth2_libp2p/src/behaviour/mod.rs @@ -160,10 +160,10 @@ impl NetworkBehaviour for Behaviour { ) { match event { // Events comming from the handler, redirected to each behaviour - BehaviourHandlerOut::Delegate(delegate) => match delegate { + BehaviourHandlerOut::Delegate(delegate) => match *delegate { DelegateOut::Gossipsub(ev) => self.gossipsub.inject_event(peer_id, conn_id, ev), DelegateOut::RPC(ev) => self.eth2_rpc.inject_event(peer_id, conn_id, ev), - DelegateOut::Identify(ev) => self.identify.inject_event(peer_id, conn_id, ev), + DelegateOut::Identify(ev) => self.identify.inject_event(peer_id, conn_id, *ev), }, /* Custom events sent BY the handler */ BehaviourHandlerOut::Custom => { diff --git a/beacon_node/eth2_libp2p/src/discovery/mod.rs b/beacon_node/eth2_libp2p/src/discovery/mod.rs index 5b703d314..aae58d15d 100644 --- a/beacon_node/eth2_libp2p/src/discovery/mod.rs +++ b/beacon_node/eth2_libp2p/src/discovery/mod.rs @@ -51,7 +51,7 @@ const FIND_NODE_QUERY_CLOSEST_PEERS: usize = 16; pub enum DiscoveryEvent { /// A query has completed. The first parameter is the `min_ttl` of the peers if it is specified /// and the second parameter are the discovered peers. - QueryResult(Option, Box>), + QueryResult(Option, Vec), /// This indicates that our local UDP socketaddr has been updated and we should inform libp2p. SocketUpdated(SocketAddr), } @@ -328,7 +328,7 @@ impl Discovery { .enr_insert(BITFIELD_ENR_KEY, current_bitfield.as_ssz_bytes()); // replace the global version - *self.network_globals.local_enr.write() = self.discv5.local_enr().clone(); + *self.network_globals.local_enr.write() = self.discv5.local_enr(); Ok(()) } @@ -360,7 +360,7 @@ impl Discovery { }); // replace the global version with discovery version - *self.network_globals.local_enr.write() = self.discv5.local_enr().clone(); + *self.network_globals.local_enr.write() = self.discv5.local_enr(); } /* Internal Functions */ @@ -602,7 +602,7 @@ impl Discovery { self.cached_enrs.put(enr.peer_id(), enr); } // return the result to the peer manager - return Poll::Ready(DiscoveryEvent::QueryResult(min_ttl, Box::new(result))); + return Poll::Ready(DiscoveryEvent::QueryResult(min_ttl, result)); } // Process the server event stream diff --git a/beacon_node/eth2_libp2p/src/peer_manager/mod.rs b/beacon_node/eth2_libp2p/src/peer_manager/mod.rs index 5d8720289..c384bfb31 100644 --- a/beacon_node/eth2_libp2p/src/peer_manager/mod.rs +++ b/beacon_node/eth2_libp2p/src/peer_manager/mod.rs @@ -422,7 +422,7 @@ impl PeerManager { /// with a new `PeerId` which involves a discovery routing table lookup. We could dial the /// multiaddr here, however this could relate to duplicate PeerId's etc. If the lookup /// proves resource constraining, we should switch to multiaddr dialling here. - fn peers_discovered(&mut self, peers: Vec, min_ttl: Option) { + fn peers_discovered(&mut self, peers: &[Enr], min_ttl: Option) { for enr in peers { let peer_id = enr.peer_id(); @@ -623,7 +623,7 @@ impl Stream for PeerManager { match event { DiscoveryEvent::SocketUpdated(socket_addr) => self.socket_updated(socket_addr), DiscoveryEvent::QueryResult(min_ttl, peers) => { - self.peers_discovered(*peers, min_ttl) + self.peers_discovered(&peers, min_ttl) } } } diff --git a/beacon_node/eth2_libp2p/src/rpc/handler.rs b/beacon_node/eth2_libp2p/src/rpc/handler.rs index ea90fafad..59ccbc28b 100644 --- a/beacon_node/eth2_libp2p/src/rpc/handler.rs +++ b/beacon_node/eth2_libp2p/src/rpc/handler.rs @@ -169,7 +169,7 @@ where /// A response has been sent, pending writing. ResponsePendingSend { /// The substream used to send the response - substream: InboundFramed, + substream: Box>, /// The message that is attempting to be sent. message: RPCCodedResponse, /// Whether a stream termination is requested. If true the stream will be closed after @@ -180,7 +180,7 @@ where /// A response has been sent, pending flush. ResponsePendingFlush { /// The substream used to send the response - substream: InboundFramed, + substream: Box>, /// Whether a stream termination is requested. If true the stream will be closed after /// this send. Otherwise it will transition to an idle state until a stream termination is /// requested or a timeout is reached. @@ -188,9 +188,9 @@ where }, /// The response stream is idle and awaiting input from the application to send more chunked /// responses. - ResponseIdle(InboundFramed), + ResponseIdle(Box>), /// The substream is attempting to shutdown. - Closing(InboundFramed), + Closing(Box>), /// Temporary state during processing Poisoned, } @@ -201,12 +201,12 @@ pub enum OutboundSubstreamState { /// handler because GOODBYE requests can be handled and responses dropped instantly. RequestPendingResponse { /// The framed negotiated substream. - substream: OutboundFramed, + substream: Box>, /// Keeps track of the actual request sent. request: RPCRequest, }, /// Closing an outbound substream> - Closing(OutboundFramed), + Closing(Box>), /// Temporary state during processing Poisoned, } @@ -551,7 +551,7 @@ where self.current_inbound_substream_id, Duration::from_secs(RESPONSE_TIMEOUT), ); - let awaiting_stream = InboundSubstreamState::ResponseIdle(substream); + let awaiting_stream = InboundSubstreamState::ResponseIdle(Box::new(substream)); self.inbound_substreams.insert( self.current_inbound_substream_id, (awaiting_stream, Some(delay_key), req.protocol()), @@ -593,7 +593,7 @@ where Duration::from_secs(RESPONSE_TIMEOUT), ); let awaiting_stream = OutboundSubstreamState::RequestPendingResponse { - substream: out, + substream: Box::new(out), request, }; let expected_responses = if expected_responses > 1 { @@ -833,7 +833,7 @@ where // await flush entry.get_mut().0 = InboundSubstreamState::ResponsePendingFlush { - substream, + substream: substream, closing, }; drive_stream_further = true; @@ -853,7 +853,7 @@ where } else { // check for queued chunks and update the stream entry.get_mut().0 = apply_queued_responses( - substream, + *substream, &mut self .queued_outbound_items .get_mut(&request_id), @@ -908,7 +908,7 @@ where } else { // check for queued chunks and update the stream entry.get_mut().0 = apply_queued_responses( - substream, + *substream, &mut self .queued_outbound_items .get_mut(&request_id), @@ -942,7 +942,7 @@ where InboundSubstreamState::ResponseIdle(substream) => { if !deactivated { entry.get_mut().0 = apply_queued_responses( - substream, + *substream, &mut self.queued_outbound_items.get_mut(&request_id), &mut drive_stream_further, ); @@ -1190,10 +1190,10 @@ fn apply_queued_responses( match queue.remove(0) { RPCCodedResponse::StreamTermination(_) => { // close the stream if this is a stream termination - InboundSubstreamState::Closing(substream) + InboundSubstreamState::Closing(Box::new(substream)) } chunk => InboundSubstreamState::ResponsePendingSend { - substream, + substream: Box::new(substream), message: chunk, closing: false, }, @@ -1201,7 +1201,7 @@ fn apply_queued_responses( } _ => { // no items queued set to idle - InboundSubstreamState::ResponseIdle(substream) + InboundSubstreamState::ResponseIdle(Box::new(substream)) } } } diff --git a/beacon_node/network/src/attestation_service/mod.rs b/beacon_node/network/src/attestation_service/mod.rs index 5626e32ce..d6591eb40 100644 --- a/beacon_node/network/src/attestation_service/mod.rs +++ b/beacon_node/network/src/attestation_service/mod.rs @@ -237,7 +237,7 @@ impl AttestationService { &mut self, _message_id: &MessageId, peer_id: &PeerId, - subnet: &SubnetId, + subnet: SubnetId, attestation: &Attestation, ) -> bool { // verify the attestation is on the correct subnet @@ -249,7 +249,7 @@ impl AttestationService { } }; - if expected_subnet != *subnet { + if expected_subnet != subnet { warn!(self.log, "Received an attestation on the wrong subnet"; "subnet_received" => format!("{:?}", subnet), "subnet_expected" => format!("{:?}",expected_subnet), "peer_id" => format!("{}", peer_id)); return false; } diff --git a/beacon_node/network/src/router/mod.rs b/beacon_node/network/src/router/mod.rs index 12a00ae28..d9f00d86f 100644 --- a/beacon_node/network/src/router/mod.rs +++ b/beacon_node/network/src/router/mod.rs @@ -218,11 +218,9 @@ impl Router { match gossip_message { // Attestations should never reach the router. PubsubMessage::AggregateAndProofAttestation(aggregate_and_proof) => { - if let Some(gossip_verified) = - self.processor.verify_aggregated_attestation_for_gossip( - peer_id.clone(), - *aggregate_and_proof.clone(), - ) + if let Some(gossip_verified) = self + .processor + .verify_aggregated_attestation_for_gossip(peer_id.clone(), *aggregate_and_proof) { self.propagate_message(id, peer_id.clone()); self.processor diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index b928145bd..2071705e6 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -300,7 +300,7 @@ fn spawn_service( if service.attestation_service.should_process_attestation( &id, &source, - subnet, + *subnet, attestation, ) { let _ = service diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 0029d6ed0..7017d502b 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -529,7 +529,7 @@ impl SyncManager { // otherwise, this is a range sync issue, notify the range sync self.range_sync - .inject_error(&mut self.network, peer_id.clone(), request_id); + .inject_error(&mut self.network, peer_id, request_id); } fn peer_disconnect(&mut self, peer_id: &PeerId) { @@ -667,7 +667,7 @@ impl SyncManager { "last_peer" => format!("{:?}", parent_request.last_submitted_peer), ); self.network - .downvote_peer(parent_request.last_submitted_peer.clone()); + .downvote_peer(parent_request.last_submitted_peer); return; } Err(e) => { @@ -677,7 +677,7 @@ impl SyncManager { "last_peer" => format!("{:?}", parent_request.last_submitted_peer), ); self.network - .downvote_peer(parent_request.last_submitted_peer.clone()); + .downvote_peer(parent_request.last_submitted_peer); return; } } diff --git a/beacon_node/rest_api/src/validator.rs b/beacon_node/rest_api/src/validator.rs index d79ca506f..f0e8026a8 100644 --- a/beacon_node/rest_api/src/validator.rs +++ b/beacon_node/rest_api/src/validator.rs @@ -467,6 +467,7 @@ pub async fn publish_attestations( /// Processes an unaggregrated attestation that was included in a list of attestations with the /// index `i`. +#[allow(clippy::redundant_clone)] // false positives in this function. fn process_unaggregated_attestation( beacon_chain: &BeaconChain, network_chan: NetworkChannel, @@ -535,6 +536,7 @@ fn process_unaggregated_attestation( } /// HTTP Handler to publish an Attestation, which has been signed by a validator. +#[allow(clippy::redundant_clone)] // false positives in this function. pub async fn publish_aggregate_and_proofs( req: Request, beacon_chain: Arc>, @@ -582,6 +584,7 @@ pub async fn publish_aggregate_and_proofs( /// Processes an aggregrated attestation that was included in a list of attestations with the index /// `i`. +#[allow(clippy::redundant_clone)] // false positives in this function. fn process_aggregated_attestation( beacon_chain: &BeaconChain, network_chan: NetworkChannel, diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 932708a36..b1a8e7065 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -39,7 +39,7 @@ pub fn get_config( fs::remove_dir_all( client_config .get_db_path() - .ok_or("Failed to get db_path".to_string())?, + .ok_or_else(|| "Failed to get db_path".to_string())?, ) .map_err(|err| format!("Failed to remove chain_db: {}", err))?; @@ -47,7 +47,7 @@ pub fn get_config( fs::remove_dir_all( client_config .get_freezer_db_path() - .ok_or("Failed to get freezer db path".to_string())?, + .ok_or_else(|| "Failed to get freezer db path".to_string())?, ) .map_err(|err| format!("Failed to remove chain_db: {}", err))?; @@ -287,7 +287,7 @@ pub fn get_config( if spec_constants != client_config.spec_constants { crit!(log, "Specification constants do not match."; - "client_config" => client_config.spec_constants.to_string(), + "client_config" => client_config.spec_constants, "eth2_config" => spec_constants ); return Err("Specification constant mismatch".into()); diff --git a/common/eth2_testnet_config/build.rs b/common/eth2_testnet_config/build.rs index 7400d05a3..b8a3d5947 100644 --- a/common/eth2_testnet_config/build.rs +++ b/common/eth2_testnet_config/build.rs @@ -10,12 +10,13 @@ const TESTNET_ID: &str = "witti-v0-11-3"; fn main() { if !base_dir().exists() { - std::fs::create_dir_all(base_dir()).expect(&format!("Unable to create {:?}", base_dir())); + std::fs::create_dir_all(base_dir()) + .unwrap_or_else(|_| panic!("Unable to create {:?}", base_dir())); match get_all_files() { Ok(()) => (), Err(e) => { - std::fs::remove_dir_all(base_dir()).expect(&format!( + std::fs::remove_dir_all(base_dir()).unwrap_or_else(|_| panic!( "{}. Failed to remove {:?}, please remove the directory manually because it may contains incomplete testnet data.", e, base_dir(), diff --git a/common/validator_dir/src/builder.rs b/common/validator_dir/src/builder.rs index 9f29f3dc0..a33d910b5 100644 --- a/common/validator_dir/src/builder.rs +++ b/common/validator_dir/src/builder.rs @@ -182,7 +182,7 @@ impl<'a> Builder<'a> { // // This allows us to know the RLP data for the eth1 transaction without needing to know // the withdrawal/voting keypairs again at a later date. - let path = dir.clone().join(ETH1_DEPOSIT_DATA_FILE); + let path = dir.join(ETH1_DEPOSIT_DATA_FILE); if path.exists() { return Err(Error::DepositDataAlreadyExists(path)); } else { @@ -191,7 +191,7 @@ impl<'a> Builder<'a> { .write(true) .read(true) .create(true) - .open(path.clone()) + .open(path) .map_err(Error::UnableToSaveDepositData)? .write_all(hex.as_bytes()) .map_err(Error::UnableToSaveDepositData)? @@ -200,7 +200,7 @@ impl<'a> Builder<'a> { // Save `ETH1_DEPOSIT_AMOUNT_FILE` to file. // // This allows us to know the intended deposit amount at a later date. - let path = dir.clone().join(ETH1_DEPOSIT_AMOUNT_FILE); + let path = dir.join(ETH1_DEPOSIT_AMOUNT_FILE); if path.exists() { return Err(Error::DepositAmountAlreadyExists(path)); } else { @@ -208,7 +208,7 @@ impl<'a> Builder<'a> { .write(true) .read(true) .create(true) - .open(path.clone()) + .open(path) .map_err(Error::UnableToSaveDepositAmount)? .write_all(format!("{}", amount).as_bytes()) .map_err(Error::UnableToSaveDepositAmount)? @@ -220,29 +220,24 @@ impl<'a> Builder<'a> { // Write the withdrawal password to file. write_password_to_file( self.password_dir - .clone() .join(withdrawal_keypair.pk.as_hex_string()), withdrawal_password.as_bytes(), )?; // Write the withdrawal keystore to file. - write_keystore_to_file( - dir.clone().join(WITHDRAWAL_KEYSTORE_FILE), - &withdrawal_keystore, - )?; + write_keystore_to_file(dir.join(WITHDRAWAL_KEYSTORE_FILE), &withdrawal_keystore)?; } } // Write the voting password to file. write_password_to_file( self.password_dir - .clone() .join(format!("0x{}", voting_keystore.pubkey())), voting_password.as_bytes(), )?; // Write the voting keystore to file. - write_keystore_to_file(dir.clone().join(VOTING_KEYSTORE_FILE), &voting_keystore)?; + write_keystore_to_file(dir.join(VOTING_KEYSTORE_FILE), &voting_keystore)?; ValidatorDir::open(dir).map_err(Error::UnableToOpenDir) } @@ -257,7 +252,7 @@ fn write_keystore_to_file(path: PathBuf, keystore: &Keystore) -> Result<(), Erro .write(true) .read(true) .create_new(true) - .open(path.clone()) + .open(path) .map_err(Error::UnableToSaveKeystore)?; keystore.to_json_writer(file).map_err(Into::into) diff --git a/consensus/proto_array_fork_choice/src/fork_choice_test_definition.rs b/consensus/proto_array_fork_choice/src/fork_choice_test_definition.rs index c74e33c45..25da695e2 100644 --- a/consensus/proto_array_fork_choice/src/fork_choice_test_definition.rs +++ b/consensus/proto_array_fork_choice/src/fork_choice_test_definition.rs @@ -80,10 +80,9 @@ impl ForkChoiceTestDefinition { finalized_epoch, &justified_state_balances, ) - .expect(&format!( - "find_head op at index {} returned error", - op_index - )); + .unwrap_or_else(|_| { + panic!("find_head op at index {} returned error", op_index) + }); assert_eq!( head, expected_head, @@ -129,10 +128,9 @@ impl ForkChoiceTestDefinition { justified_epoch, finalized_epoch, ) - .expect(&format!( - "process_block op at index {} returned error", - op_index - )); + .unwrap_or_else(|_| { + panic!("process_block op at index {} returned error", op_index) + }); check_bytes_round_trip(&fork_choice); } Operation::ProcessAttestation { @@ -142,10 +140,12 @@ impl ForkChoiceTestDefinition { } => { fork_choice .process_attestation(validator_index, block_root, target_epoch) - .expect(&format!( - "process_attestation op at index {} returned error", - op_index - )); + .unwrap_or_else(|_| { + panic!( + "process_attestation op at index {} returned error", + op_index + ) + }); check_bytes_round_trip(&fork_choice); } Operation::Prune { diff --git a/consensus/proto_array_fork_choice/src/fork_choice_test_definition/ffg_updates.rs b/consensus/proto_array_fork_choice/src/fork_choice_test_definition/ffg_updates.rs index 9dd9417f2..4b7eb25d7 100644 --- a/consensus/proto_array_fork_choice/src/fork_choice_test_definition/ffg_updates.rs +++ b/consensus/proto_array_fork_choice/src/fork_choice_test_definition/ffg_updates.rs @@ -91,7 +91,7 @@ pub fn get_ffg_case_01_test_definition() -> ForkChoiceTestDefinition { justified_epoch: Epoch::new(2), justified_root: get_hash(3), finalized_epoch: Epoch::new(1), - justified_state_balances: balances.clone(), + justified_state_balances: balances, expected_head: get_hash(3), }); @@ -421,7 +421,7 @@ pub fn get_ffg_case_02_test_definition() -> ForkChoiceTestDefinition { justified_epoch: Epoch::new(3), justified_root: get_hash(2), finalized_epoch: Epoch::new(0), - justified_state_balances: balances.clone(), + justified_state_balances: balances, }); // END OF TESTS diff --git a/consensus/proto_array_fork_choice/src/fork_choice_test_definition/no_votes.rs b/consensus/proto_array_fork_choice/src/fork_choice_test_definition/no_votes.rs index 279cde52c..e42abe288 100644 --- a/consensus/proto_array_fork_choice/src/fork_choice_test_definition/no_votes.rs +++ b/consensus/proto_array_fork_choice/src/fork_choice_test_definition/no_votes.rs @@ -212,7 +212,7 @@ pub fn get_no_votes_test_definition() -> ForkChoiceTestDefinition { justified_epoch: Epoch::new(2), justified_root: get_hash(5), finalized_epoch: Epoch::new(1), - justified_state_balances: balances.clone(), + justified_state_balances: balances, expected_head: get_hash(6), }, ]; diff --git a/consensus/proto_array_fork_choice/src/fork_choice_test_definition/votes.rs b/consensus/proto_array_fork_choice/src/fork_choice_test_definition/votes.rs index 4f8091269..ac9513c5f 100644 --- a/consensus/proto_array_fork_choice/src/fork_choice_test_definition/votes.rs +++ b/consensus/proto_array_fork_choice/src/fork_choice_test_definition/votes.rs @@ -673,7 +673,7 @@ pub fn get_votes_test_definition() -> ForkChoiceTestDefinition { justified_epoch: Epoch::new(2), justified_root: get_hash(5), finalized_epoch: Epoch::new(2), - justified_state_balances: balances.clone(), + justified_state_balances: balances, expected_head: get_hash(11), }); diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 2eee8b8e7..75c957b91 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -83,12 +83,12 @@ impl BeaconBlock { }; let proposer_slashing = ProposerSlashing { signed_header_1: signed_header.clone(), - signed_header_2: signed_header.clone(), + signed_header_2: signed_header, }; let attester_slashing = AttesterSlashing { attestation_1: indexed_attestation.clone(), - attestation_2: indexed_attestation.clone(), + attestation_2: indexed_attestation, }; let attestation: Attestation = Attestation { diff --git a/crypto/eth2_keystore/src/keystore.rs b/crypto/eth2_keystore/src/keystore.rs index 04c0655a6..37b6ace86 100644 --- a/crypto/eth2_keystore/src/keystore.rs +++ b/crypto/eth2_keystore/src/keystore.rs @@ -190,7 +190,7 @@ impl Keystore { let keypair = keypair_from_secret(plain_text.as_bytes())?; // Verify that the derived `PublicKey` matches `self`. - if keypair.pk.as_hex_string()[2..].to_string() != self.json.pubkey { + if keypair.pk.as_hex_string()[2..] != self.json.pubkey { return Err(Error::PublicKeyMismatch); } diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index 119f3247a..1772cfeec 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -271,7 +271,7 @@ impl Environment { executor: TaskExecutor { exit: self.exit.clone(), handle: self.runtime().handle().clone(), - log: self.log.new(o!("service" => service_name.clone())), + log: self.log.new(o!("service" => service_name)), }, eth_spec_instance: self.eth_spec_instance.clone(), eth2_config: self.eth2_config.clone(), diff --git a/validator_client/src/duties_service.rs b/validator_client/src/duties_service.rs index b7c0c0876..b98bb3c20 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/src/duties_service.rs @@ -230,12 +230,12 @@ impl DutiesStore { .collect() } - fn is_aggregator(&self, validator_pubkey: &PublicKey, epoch: &Epoch) -> Option { + fn is_aggregator(&self, validator_pubkey: &PublicKey, epoch: Epoch) -> Option { Some( self.store .read() .get(validator_pubkey)? - .get(epoch)? + .get(&epoch)? .selection_proof .is_some(), ) @@ -602,7 +602,7 @@ impl DutiesService { // The selection proof is computed on `store.insert`, so it's necessary to check // with the store that the validator is an aggregator. - let is_aggregator = self.store.is_aggregator(&validator_pubkey, &epoch)?; + let is_aggregator = self.store.is_aggregator(&validator_pubkey, epoch)?; if outcome.is_subscription_candidate() { Some(ValidatorSubscription {