diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index d7a8bca4d..019e87309 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -539,8 +539,8 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { Err(e) => return Err(SignatureNotChecked(&signed_aggregate.message.aggregate, e)), }; - let indexed_attestation = - match map_attestation_committee(chain, attestation, |(committee, _)| { + let get_indexed_attestation_with_committee = + |(committee, _): (BeaconCommittee, CommitteesPerSlot)| { // Note: this clones the signature which is known to be a relatively slow operation. // // Future optimizations should remove this clone. @@ -561,11 +561,17 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> { get_indexed_attestation(committee.committee, attestation) .map_err(|e| BeaconChainError::from(e).into()) - }) { - Ok(indexed_attestation) => indexed_attestation, - Err(e) => return Err(SignatureNotChecked(&signed_aggregate.message.aggregate, e)), }; + let indexed_attestation = match map_attestation_committee( + chain, + attestation, + get_indexed_attestation_with_committee, + ) { + Ok(indexed_attestation) => indexed_attestation, + Err(e) => return Err(SignatureNotChecked(&signed_aggregate.message.aggregate, e)), + }; + Ok(IndexedAggregatedAttestation { signed_aggregate, indexed_attestation, diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index 8b6c6b374..563c29659 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -967,7 +967,7 @@ mod test { let spec = &E::default_spec(); let state: BeaconState = BeaconState::new(0, get_eth1_data(0), spec); - let blocks = vec![]; + let blocks = []; assert_eq!( get_votes_to_consider( diff --git a/beacon_node/beacon_chain/src/state_advance_timer.rs b/beacon_node/beacon_chain/src/state_advance_timer.rs index c04815ebc..39d35f811 100644 --- a/beacon_node/beacon_chain/src/state_advance_timer.rs +++ b/beacon_node/beacon_chain/src/state_advance_timer.rs @@ -51,7 +51,8 @@ const MAX_BLOCK_PRODUCTION_CACHE_DISTANCE: u64 = 4; #[derive(Debug)] enum Error { BeaconChain(BeaconChainError), - HeadMissingFromSnapshotCache(Hash256), + // We don't use the inner value directly, but it's used in the Debug impl. + HeadMissingFromSnapshotCache(#[allow(dead_code)] Hash256), MaxDistanceExceeded { current_slot: Slot, head_slot: Slot, diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 3d4ea51f4..2c5bde55e 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -54,7 +54,8 @@ impl Operation { } #[derive(Debug)] -struct Custom(String); +// We don't use the string value directly, but it's used in the Debug impl which is required by `warp::reject::Reject`. +struct Custom(#[allow(dead_code)] String); impl warp::reject::Reject for Custom {} diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index f0be51114..b50949470 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -599,8 +599,8 @@ async fn handle_rejection(err: Rejection) -> Result() { - message = format!("Authorization error: {:?}", e); + if let Some(AuthError(e)) = err.find::() { + message = format!("Authorization error: {}", e); code = StatusCode::UNAUTHORIZED; } else { message = "BAD_REQUEST".to_string(); diff --git a/beacon_node/http_api/src/attestation_performance.rs b/beacon_node/http_api/src/attestation_performance.rs index 3e7d8d5e3..6e3ebccce 100644 --- a/beacon_node/http_api/src/attestation_performance.rs +++ b/beacon_node/http_api/src/attestation_performance.rs @@ -14,11 +14,12 @@ const MAX_REQUEST_RANGE_EPOCHS: usize = 100; const BLOCK_ROOT_CHUNK_SIZE: usize = 100; #[derive(Debug)] +// We don't use the inner values directly, but they're used in the Debug impl. enum AttestationPerformanceError { - BlockReplay(BlockReplayError), - BeaconState(BeaconStateError), - ParticipationCache(ParticipationCacheError), - UnableToFindValidator(usize), + BlockReplay(#[allow(dead_code)] BlockReplayError), + BeaconState(#[allow(dead_code)] BeaconStateError), + ParticipationCache(#[allow(dead_code)] ParticipationCacheError), + UnableToFindValidator(#[allow(dead_code)] usize), } impl From for AttestationPerformanceError { diff --git a/beacon_node/http_api/src/block_packing_efficiency.rs b/beacon_node/http_api/src/block_packing_efficiency.rs index e099e130a..c73dcb7e0 100644 --- a/beacon_node/http_api/src/block_packing_efficiency.rs +++ b/beacon_node/http_api/src/block_packing_efficiency.rs @@ -19,10 +19,11 @@ use warp_utils::reject::{beacon_chain_error, custom_bad_request, custom_server_e const BLOCK_ROOT_CHUNK_SIZE: usize = 100; #[derive(Debug)] +// We don't use the inner values directly, but they're used in the Debug impl. enum PackingEfficiencyError { - BlockReplay(BlockReplayError), - BeaconState(BeaconStateError), - CommitteeStoreError(Slot), + BlockReplay(#[allow(dead_code)] BlockReplayError), + BeaconState(#[allow(dead_code)] BeaconStateError), + CommitteeStoreError(#[allow(dead_code)] Slot), InvalidAttestationError, } diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index fe01f3c52..f89bc3c29 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1019,7 +1019,7 @@ pub fn serve( Ok(( state .get_built_sync_committee(epoch, &chain.spec) - .map(|committee| committee.clone()) + .cloned() .map_err(|e| match e { BeaconStateError::SyncCommitteeNotKnown { .. } => { warp_utils::reject::custom_bad_request(format!( @@ -2858,7 +2858,7 @@ pub fn serve( hex::encode( meta_data .syncnets() - .map(|x| x.clone()) + .cloned() .unwrap_or_default() .into_bytes() ) diff --git a/beacon_node/lighthouse_network/src/gossipsub/behaviour/tests.rs b/beacon_node/lighthouse_network/src/gossipsub/behaviour/tests.rs index 4e02e4016..9d8a10bcc 100644 --- a/beacon_node/lighthouse_network/src/gossipsub/behaviour/tests.rs +++ b/beacon_node/lighthouse_network/src/gossipsub/behaviour/tests.rs @@ -174,7 +174,7 @@ fn inject_nodes1() -> InjectNodes fn add_peer( gs: &mut Behaviour, - topic_hashes: &Vec, + topic_hashes: &[TopicHash], outbound: bool, explicit: bool, ) -> (PeerId, RpcReceiver) @@ -187,7 +187,7 @@ where fn add_peer_with_addr( gs: &mut Behaviour, - topic_hashes: &Vec, + topic_hashes: &[TopicHash], outbound: bool, explicit: bool, address: Multiaddr, @@ -208,7 +208,7 @@ where fn add_peer_with_addr_and_kind( gs: &mut Behaviour, - topic_hashes: &Vec, + topic_hashes: &[TopicHash], outbound: bool, explicit: bool, address: Multiaddr, @@ -3218,7 +3218,7 @@ fn test_scoring_p1() { ); } -fn random_message(seq: &mut u64, topics: &Vec) -> RawMessage { +fn random_message(seq: &mut u64, topics: &[TopicHash]) -> RawMessage { let mut rng = rand::thread_rng(); *seq += 1; RawMessage { @@ -4080,20 +4080,20 @@ fn test_scoring_p6() { //create 5 peers with the same ip let addr = Multiaddr::from(Ipv4Addr::new(10, 1, 2, 3)); let peers = vec![ - add_peer_with_addr(&mut gs, &vec![], false, false, addr.clone()).0, - add_peer_with_addr(&mut gs, &vec![], false, false, addr.clone()).0, - add_peer_with_addr(&mut gs, &vec![], true, false, addr.clone()).0, - add_peer_with_addr(&mut gs, &vec![], true, false, addr.clone()).0, - add_peer_with_addr(&mut gs, &vec![], true, true, addr.clone()).0, + add_peer_with_addr(&mut gs, &[], false, false, addr.clone()).0, + add_peer_with_addr(&mut gs, &[], false, false, addr.clone()).0, + add_peer_with_addr(&mut gs, &[], true, false, addr.clone()).0, + add_peer_with_addr(&mut gs, &[], true, false, addr.clone()).0, + add_peer_with_addr(&mut gs, &[], true, true, addr.clone()).0, ]; //create 4 other peers with other ip let addr2 = Multiaddr::from(Ipv4Addr::new(10, 1, 2, 4)); let others = vec![ - add_peer_with_addr(&mut gs, &vec![], false, false, addr2.clone()).0, - add_peer_with_addr(&mut gs, &vec![], false, false, addr2.clone()).0, - add_peer_with_addr(&mut gs, &vec![], true, false, addr2.clone()).0, - add_peer_with_addr(&mut gs, &vec![], true, false, addr2.clone()).0, + add_peer_with_addr(&mut gs, &[], false, false, addr2.clone()).0, + add_peer_with_addr(&mut gs, &[], false, false, addr2.clone()).0, + add_peer_with_addr(&mut gs, &[], true, false, addr2.clone()).0, + add_peer_with_addr(&mut gs, &[], true, false, addr2.clone()).0, ]; //no penalties yet diff --git a/beacon_node/lighthouse_network/tests/common.rs b/beacon_node/lighthouse_network/tests/common.rs index ea0318ca9..af4824467 100644 --- a/beacon_node/lighthouse_network/tests/common.rs +++ b/beacon_node/lighthouse_network/tests/common.rs @@ -42,7 +42,12 @@ pub fn fork_context(fork_name: ForkName) -> ForkContext { ForkContext::new::(current_slot, Hash256::zero(), &chain_spec) } -pub struct Libp2pInstance(LibP2PService, exit_future::Signal); +pub struct Libp2pInstance( + LibP2PService, + #[allow(dead_code)] + // This field is managed for lifetime purposes may not be used directly, hence the `#[allow(dead_code)]` attribute. + exit_future::Signal, +); impl std::ops::Deref for Libp2pInstance { type Target = LibP2PService; diff --git a/beacon_node/network/src/subnet_service/tests/mod.rs b/beacon_node/network/src/subnet_service/tests/mod.rs index 769775a62..658c851ba 100644 --- a/beacon_node/network/src/subnet_service/tests/mod.rs +++ b/beacon_node/network/src/subnet_service/tests/mod.rs @@ -253,7 +253,7 @@ mod attestation_service { &attestation_service.beacon_chain.spec, ) .unwrap(); - let expected = vec![ + let expected = [ SubnetServiceMessage::Subscribe(Subnet::Attestation(subnet_id)), SubnetServiceMessage::Unsubscribe(Subnet::Attestation(subnet_id)), ]; diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 63cd8e67d..49bc75393 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -1344,7 +1344,7 @@ impl, Cold: ItemStore> HotColdDB high_restore_point .get_block_root(slot) .or_else(|_| high_restore_point.get_oldest_block_root()) - .map(|x| *x) + .copied() .map_err(HotColdDBError::RestorePointBlockHashError) } diff --git a/common/validator_dir/src/builder.rs b/common/validator_dir/src/builder.rs index bccf9086a..3d5d14960 100644 --- a/common/validator_dir/src/builder.rs +++ b/common/validator_dir/src/builder.rs @@ -214,6 +214,7 @@ impl<'a> Builder<'a> { .write(true) .read(true) .create(true) + .truncate(true) .open(path) .map_err(Error::UnableToSaveDepositData)? .write_all(hex.as_bytes()) @@ -231,6 +232,7 @@ impl<'a> Builder<'a> { .write(true) .read(true) .create(true) + .truncate(true) .open(path) .map_err(Error::UnableToSaveDepositAmount)? .write_all(format!("{}", amount).as_bytes()) diff --git a/consensus/cached_tree_hash/src/impls.rs b/consensus/cached_tree_hash/src/impls.rs index 0624bd201..efdba32b5 100644 --- a/consensus/cached_tree_hash/src/impls.rs +++ b/consensus/cached_tree_hash/src/impls.rs @@ -26,13 +26,11 @@ pub fn u64_leaf_count(len: usize) -> usize { pub fn hash256_iter( values: &[Hash256], -) -> impl Iterator + ExactSizeIterator + '_ { +) -> impl ExactSizeIterator + '_ { values.iter().copied().map(Hash256::to_fixed_bytes) } -pub fn u64_iter( - values: &[u64], -) -> impl Iterator + ExactSizeIterator + '_ { +pub fn u64_iter(values: &[u64]) -> impl ExactSizeIterator + '_ { let type_size = size_of::(); let vals_per_chunk = BYTES_PER_CHUNK / type_size; values.chunks(vals_per_chunk).map(move |xs| { diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index bdc3be9ec..4c0ee1bfa 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -641,7 +641,7 @@ impl BeaconState { if self.slot() <= decision_slot { Ok(block_root) } else { - self.get_block_root(decision_slot).map(|root| *root) + self.get_block_root(decision_slot).copied() } } @@ -657,7 +657,7 @@ impl BeaconState { if self.slot() == decision_slot { Ok(block_root) } else { - self.get_block_root(decision_slot).map(|root| *root) + self.get_block_root(decision_slot).copied() } } @@ -683,7 +683,7 @@ impl BeaconState { if self.slot() == decision_slot { Ok(block_root) } else { - self.get_block_root(decision_slot).map(|root| *root) + self.get_block_root(decision_slot).copied() } } diff --git a/consensus/types/src/historical_summary.rs b/consensus/types/src/historical_summary.rs index dcc387d3d..d212f8a5e 100644 --- a/consensus/types/src/historical_summary.rs +++ b/consensus/types/src/historical_summary.rs @@ -81,7 +81,7 @@ impl<'a, N: Unsigned> CachedTreeHash for HistoricalSummaryCache<' pub fn leaf_iter( values: &[HistoricalSummary], -) -> impl Iterator + ExactSizeIterator + '_ { +) -> impl ExactSizeIterator + '_ { values .iter() .map(|value| value.tree_hash_root()) diff --git a/consensus/types/src/participation_list.rs b/consensus/types/src/participation_list.rs index be119fbef..6e3d916de 100644 --- a/consensus/types/src/participation_list.rs +++ b/consensus/types/src/participation_list.rs @@ -43,7 +43,7 @@ pub fn leaf_count(len: usize) -> usize { pub fn leaf_iter( values: &[ParticipationFlags], -) -> impl Iterator + ExactSizeIterator + '_ { +) -> impl ExactSizeIterator + '_ { values.chunks(BYTES_PER_CHUNK).map(|xs| { // Zero-pad chunks on the right. let mut chunk = [0u8; BYTES_PER_CHUNK]; diff --git a/database_manager/src/lib.rs b/database_manager/src/lib.rs index 95af4d638..87a1a8bd1 100644 --- a/database_manager/src/lib.rs +++ b/database_manager/src/lib.rs @@ -356,6 +356,7 @@ pub fn inspect_db( let write_result = fs::OpenOptions::new() .create(true) + .truncate(true) .write(true) .open(&file_path) .map_err(|e| format!("Failed to open file: {:?}", e)) diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index a1f6b26a9..40001f1e1 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -434,7 +434,7 @@ impl Environment { async move { rx.next().await.ok_or("Internal shutdown channel exhausted") }; futures::pin_mut!(inner_shutdown); - match self.runtime().block_on(async { + let register_handlers = async { let mut handles = vec![]; // setup for handling SIGTERM @@ -465,7 +465,9 @@ impl Environment { } future::select(inner_shutdown, future::select_all(handles.into_iter())).await - }) { + }; + + match self.runtime().block_on(register_handlers) { future::Either::Left((Ok(reason), _)) => { info!(self.log, "Internal shutdown received"; "reason" => reason.message()); Ok(reason)