1.57.0 lints (#2850)
## Issue Addressed New rust lints ## Proposed Changes - Boxing some enum variants - removing some unused fields (is the validator lockfile unused? seemed so to me) ## Additional Info - some error fields were marked as dead code but are logged out in areas - left some dead fields in our ef test code because I assume they are useful for debugging? Co-authored-by: realbigsean <seananderson33@gmail.com>
This commit is contained in:
parent
f3c237cfa0
commit
a80ccc3a33
@ -1602,7 +1602,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
// This method is called for API and gossip attestations, so this covers all unaggregated attestation events
|
||||
if let Some(event_handler) = self.event_handler.as_ref() {
|
||||
if event_handler.has_attestation_subscribers() {
|
||||
event_handler.register(EventKind::Attestation(v.attestation().clone()));
|
||||
event_handler
|
||||
.register(EventKind::Attestation(Box::new(v.attestation().clone())));
|
||||
}
|
||||
}
|
||||
metrics::inc_counter(&metrics::UNAGGREGATED_ATTESTATION_PROCESSING_SUCCESSES);
|
||||
@ -1638,7 +1639,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
// This method is called for API and gossip attestations, so this covers all aggregated attestation events
|
||||
if let Some(event_handler) = self.event_handler.as_ref() {
|
||||
if event_handler.has_attestation_subscribers() {
|
||||
event_handler.register(EventKind::Attestation(v.attestation().clone()));
|
||||
event_handler
|
||||
.register(EventKind::Attestation(Box::new(v.attestation().clone())));
|
||||
}
|
||||
}
|
||||
metrics::inc_counter(&metrics::AGGREGATED_ATTESTATION_PROCESSING_SUCCESSES);
|
||||
|
@ -41,9 +41,17 @@ const MAX_ADVANCE_DISTANCE: u64 = 4;
|
||||
enum Error {
|
||||
BeaconChain(BeaconChainError),
|
||||
HeadMissingFromSnapshotCache(Hash256),
|
||||
MaxDistanceExceeded { current_slot: Slot, head_slot: Slot },
|
||||
StateAlreadyAdvanced { block_root: Hash256 },
|
||||
BadStateSlot { state_slot: Slot, block_slot: Slot },
|
||||
MaxDistanceExceeded {
|
||||
current_slot: Slot,
|
||||
head_slot: Slot,
|
||||
},
|
||||
StateAlreadyAdvanced {
|
||||
block_root: Hash256,
|
||||
},
|
||||
BadStateSlot {
|
||||
_state_slot: Slot,
|
||||
_block_slot: Slot,
|
||||
},
|
||||
}
|
||||
|
||||
impl From<BeaconChainError> for Error {
|
||||
@ -224,8 +232,8 @@ fn advance_head<T: BeaconChainTypes>(
|
||||
// Advancing more than one slot without storing the intermediate state would corrupt the
|
||||
// database. Future works might store temporary, intermediate states inside this function.
|
||||
return Err(Error::BadStateSlot {
|
||||
block_slot: head_slot,
|
||||
state_slot: state.slot(),
|
||||
_block_slot: head_slot,
|
||||
_state_slot: state.slot(),
|
||||
});
|
||||
};
|
||||
|
||||
|
@ -243,8 +243,8 @@ enum Error {
|
||||
/// The file read from disk does not have a contiguous list of validator public keys. The file
|
||||
/// has become corrupted.
|
||||
InconsistentIndex {
|
||||
expected: Option<usize>,
|
||||
found: usize,
|
||||
_expected: Option<usize>,
|
||||
_found: usize,
|
||||
},
|
||||
}
|
||||
|
||||
@ -296,8 +296,8 @@ impl ValidatorPubkeyCacheFile {
|
||||
indices.insert(pubkey, index);
|
||||
} else {
|
||||
return Err(Error::InconsistentIndex {
|
||||
expected,
|
||||
found: index,
|
||||
_expected: expected,
|
||||
_found: index,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
@ -2310,7 +2310,7 @@ impl ApiTester {
|
||||
self.attestations
|
||||
.clone()
|
||||
.into_iter()
|
||||
.map(|attestation| EventKind::Attestation(attestation))
|
||||
.map(|attestation| EventKind::Attestation(Box::new(attestation)))
|
||||
.collect::<Vec<_>>()
|
||||
.as_slice()
|
||||
);
|
||||
|
@ -63,7 +63,7 @@ use types::{
|
||||
SyncCommitteeMessage, SyncSubnetId,
|
||||
};
|
||||
use work_reprocessing_queue::{
|
||||
spawn_reprocess_scheduler, QueuedAggregate, QueuedBlock, QueuedUnaggregate, ReadyWork,
|
||||
spawn_reprocess_scheduler, QueuedAggregate, QueuedUnaggregate, ReadyWork,
|
||||
};
|
||||
|
||||
use worker::{Toolbox, Worker};
|
||||
@ -72,6 +72,7 @@ mod tests;
|
||||
mod work_reprocessing_queue;
|
||||
mod worker;
|
||||
|
||||
use crate::beacon_processor::work_reprocessing_queue::QueuedBlock;
|
||||
pub use worker::{GossipAggregatePackage, GossipAttestationPackage, ProcessId};
|
||||
|
||||
/// The maximum size of the channel for work events to the `BeaconProcessor`.
|
||||
@ -574,7 +575,7 @@ impl<T: BeaconChainTypes> std::convert::From<ReadyWork<T>> for WorkEvent<T> {
|
||||
drop_during_sync: false,
|
||||
work: Work::DelayedImportBlock {
|
||||
peer_id,
|
||||
block: Box::new(block),
|
||||
block,
|
||||
seen_timestamp,
|
||||
},
|
||||
},
|
||||
|
@ -91,7 +91,7 @@ pub struct QueuedAggregate<T: EthSpec> {
|
||||
/// A block that arrived early and has been queued for later import.
|
||||
pub struct QueuedBlock<T: BeaconChainTypes> {
|
||||
pub peer_id: PeerId,
|
||||
pub block: GossipVerifiedBlock<T>,
|
||||
pub block: Box<GossipVerifiedBlock<T>>,
|
||||
pub seen_timestamp: Duration,
|
||||
}
|
||||
|
||||
|
@ -121,7 +121,6 @@ pub struct GossipAttestationPackage<E: EthSpec> {
|
||||
peer_id: PeerId,
|
||||
attestation: Box<Attestation<E>>,
|
||||
subnet_id: SubnetId,
|
||||
beacon_block_root: Hash256,
|
||||
should_import: bool,
|
||||
seen_timestamp: Duration,
|
||||
}
|
||||
@ -138,7 +137,6 @@ impl<E: EthSpec> GossipAttestationPackage<E> {
|
||||
Self {
|
||||
message_id,
|
||||
peer_id,
|
||||
beacon_block_root: attestation.data.beacon_block_root,
|
||||
attestation,
|
||||
subnet_id,
|
||||
should_import,
|
||||
@ -830,7 +828,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
if reprocess_tx
|
||||
.try_send(ReprocessQueueMessage::EarlyBlock(QueuedBlock {
|
||||
peer_id,
|
||||
block: verified_block,
|
||||
block: Box::new(verified_block),
|
||||
seen_timestamp: seen_duration,
|
||||
}))
|
||||
.is_err()
|
||||
|
@ -226,7 +226,7 @@ pub fn get_config<E: EthSpec>(
|
||||
client_config.sync_eth1_chain = true;
|
||||
client_config.eth1.endpoints = endpoints
|
||||
.split(',')
|
||||
.map(|s| SensitiveUrl::parse(s))
|
||||
.map(SensitiveUrl::parse)
|
||||
.collect::<Result<_, _>>()
|
||||
.map_err(|e| format!("eth1-endpoints contains an invalid URL {:?}", e))?;
|
||||
}
|
||||
@ -245,7 +245,7 @@ pub fn get_config<E: EthSpec>(
|
||||
client_config.sync_eth1_chain = true;
|
||||
client_config.execution_endpoints = endpoints
|
||||
.split(',')
|
||||
.map(|s| SensitiveUrl::parse(s))
|
||||
.map(SensitiveUrl::parse)
|
||||
.collect::<Result<_, _>>()
|
||||
.map(Some)
|
||||
.map_err(|e| format!("execution-endpoints contains an invalid URL {:?}", e))?;
|
||||
|
@ -777,7 +777,7 @@ pub struct SseLateHead {
|
||||
#[derive(PartialEq, Debug, Serialize, Clone)]
|
||||
#[serde(bound = "T: EthSpec", untagged)]
|
||||
pub enum EventKind<T: EthSpec> {
|
||||
Attestation(Attestation<T>),
|
||||
Attestation(Box<Attestation<T>>),
|
||||
Block(SseBlock),
|
||||
FinalizedCheckpoint(SseFinalizedCheckpoint),
|
||||
Head(SseHead),
|
||||
|
@ -11,7 +11,7 @@ use std::path::{Path, PathBuf};
|
||||
/// outage) caused the lockfile not to be deleted.
|
||||
#[derive(Debug)]
|
||||
pub struct Lockfile {
|
||||
file: File,
|
||||
_file: File,
|
||||
path: PathBuf,
|
||||
file_existed: bool,
|
||||
}
|
||||
@ -43,7 +43,7 @@ impl Lockfile {
|
||||
_ => LockfileError::IoError(path.clone(), e),
|
||||
})?;
|
||||
Ok(Self {
|
||||
file,
|
||||
_file: file,
|
||||
path,
|
||||
file_existed,
|
||||
})
|
||||
|
@ -99,7 +99,7 @@ impl<'a> AlignedRecordDecorator<'a> {
|
||||
|
||||
impl<'a> Write for AlignedRecordDecorator<'a> {
|
||||
fn write(&mut self, buf: &[u8]) -> Result<usize> {
|
||||
if buf.iter().any(|c| is_ascii_control(c)) {
|
||||
if buf.iter().any(u8::is_ascii_control) {
|
||||
let filtered = buf
|
||||
.iter()
|
||||
.cloned()
|
||||
|
@ -128,7 +128,7 @@ impl MonitoringHttpClient {
|
||||
Error::BeaconMetricsFailed("Beacon metrics require db path".to_string())
|
||||
})?;
|
||||
|
||||
let freezer_db_path = self.db_path.as_ref().ok_or_else(|| {
|
||||
let freezer_db_path = self.freezer_db_path.as_ref().ok_or_else(|| {
|
||||
Error::BeaconMetricsFailed("Beacon metrics require freezer db path".to_string())
|
||||
})?;
|
||||
let metrics =
|
||||
|
@ -63,7 +63,7 @@ pub struct Eth1DepositData {
|
||||
pub struct ValidatorDir {
|
||||
dir: PathBuf,
|
||||
#[derivative(PartialEq = "ignore")]
|
||||
lockfile: Lockfile,
|
||||
_lockfile: Lockfile,
|
||||
}
|
||||
|
||||
impl ValidatorDir {
|
||||
@ -85,7 +85,10 @@ impl ValidatorDir {
|
||||
let lockfile_path = dir.join(format!("{}.lock", VOTING_KEYSTORE_FILE));
|
||||
let lockfile = Lockfile::new(lockfile_path).map_err(Error::LockfileError)?;
|
||||
|
||||
Ok(Self { dir, lockfile })
|
||||
Ok(Self {
|
||||
dir,
|
||||
_lockfile: lockfile,
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns the `dir` provided to `Self::open`.
|
||||
|
@ -491,8 +491,8 @@ mod tests {
|
||||
subs.push(sub);
|
||||
}
|
||||
|
||||
for mut sub in subs.iter_mut() {
|
||||
test_routine(arena, &mut sub);
|
||||
for sub in subs.iter_mut() {
|
||||
test_routine(arena, sub);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -113,7 +113,7 @@ impl<'a> SszEncoder<'a> {
|
||||
F: Fn(&mut Vec<u8>),
|
||||
{
|
||||
if is_ssz_fixed_len {
|
||||
ssz_append(&mut self.buf);
|
||||
ssz_append(self.buf);
|
||||
} else {
|
||||
self.buf
|
||||
.extend_from_slice(&encode_length(self.offset + self.variable_bytes.len()));
|
||||
@ -129,7 +129,7 @@ impl<'a> SszEncoder<'a> {
|
||||
pub fn finalize(&mut self) -> &mut Vec<u8> {
|
||||
self.buf.append(&mut self.variable_bytes);
|
||||
|
||||
&mut self.buf
|
||||
self.buf
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -187,14 +187,13 @@ fn valid_4_deposits() {
|
||||
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT);
|
||||
let mut state = harness.get_current_state();
|
||||
|
||||
let (deposits, mut state) = harness.make_deposits(&mut state, 4, None, None);
|
||||
let (deposits, state) = harness.make_deposits(&mut state, 4, None, None);
|
||||
let deposits = VariableList::from(deposits);
|
||||
|
||||
let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0;
|
||||
*head_block.to_mut().body_mut().deposits_mut() = deposits;
|
||||
|
||||
let result =
|
||||
process_operations::process_deposits(&mut state, head_block.body().deposits(), &spec);
|
||||
let result = process_operations::process_deposits(state, head_block.body().deposits(), &spec);
|
||||
|
||||
// Expecting Ok because these are valid deposits.
|
||||
assert_eq!(result, Ok(()));
|
||||
@ -206,7 +205,7 @@ fn invalid_deposit_deposit_count_too_big() {
|
||||
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT);
|
||||
let mut state = harness.get_current_state();
|
||||
|
||||
let (deposits, mut state) = harness.make_deposits(&mut state, 1, None, None);
|
||||
let (deposits, state) = harness.make_deposits(&mut state, 1, None, None);
|
||||
let deposits = VariableList::from(deposits);
|
||||
|
||||
let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0;
|
||||
@ -214,8 +213,7 @@ fn invalid_deposit_deposit_count_too_big() {
|
||||
|
||||
let big_deposit_count = NUM_DEPOSITS + 1;
|
||||
state.eth1_data_mut().deposit_count = big_deposit_count;
|
||||
let result =
|
||||
process_operations::process_deposits(&mut state, head_block.body().deposits(), &spec);
|
||||
let result = process_operations::process_deposits(state, head_block.body().deposits(), &spec);
|
||||
|
||||
// Expecting DepositCountInvalid because we incremented the deposit_count
|
||||
assert_eq!(
|
||||
@ -233,7 +231,7 @@ fn invalid_deposit_count_too_small() {
|
||||
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT);
|
||||
let mut state = harness.get_current_state();
|
||||
|
||||
let (deposits, mut state) = harness.make_deposits(&mut state, 1, None, None);
|
||||
let (deposits, state) = harness.make_deposits(&mut state, 1, None, None);
|
||||
let deposits = VariableList::from(deposits);
|
||||
|
||||
let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0;
|
||||
@ -241,8 +239,7 @@ fn invalid_deposit_count_too_small() {
|
||||
|
||||
let small_deposit_count = NUM_DEPOSITS - 1;
|
||||
state.eth1_data_mut().deposit_count = small_deposit_count;
|
||||
let result =
|
||||
process_operations::process_deposits(&mut state, head_block.body().deposits(), &spec);
|
||||
let result = process_operations::process_deposits(state, head_block.body().deposits(), &spec);
|
||||
|
||||
// Expecting DepositCountInvalid because we decremented the deposit_count
|
||||
assert_eq!(
|
||||
@ -260,7 +257,7 @@ fn invalid_deposit_bad_merkle_proof() {
|
||||
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT);
|
||||
let mut state = harness.get_current_state();
|
||||
|
||||
let (deposits, mut state) = harness.make_deposits(&mut state, 1, None, None);
|
||||
let (deposits, state) = harness.make_deposits(&mut state, 1, None, None);
|
||||
let deposits = VariableList::from(deposits);
|
||||
|
||||
let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0;
|
||||
@ -270,8 +267,7 @@ fn invalid_deposit_bad_merkle_proof() {
|
||||
// Manually offsetting deposit count and index to trigger bad merkle proof
|
||||
state.eth1_data_mut().deposit_count += 1;
|
||||
*state.eth1_deposit_index_mut() += 1;
|
||||
let result =
|
||||
process_operations::process_deposits(&mut state, head_block.body().deposits(), &spec);
|
||||
let result = process_operations::process_deposits(state, head_block.body().deposits(), &spec);
|
||||
|
||||
// Expecting BadMerkleProof because the proofs were created with different indices
|
||||
assert_eq!(
|
||||
@ -289,15 +285,14 @@ fn invalid_deposit_wrong_sig() {
|
||||
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT);
|
||||
let mut state = harness.get_current_state();
|
||||
|
||||
let (deposits, mut state) =
|
||||
let (deposits, state) =
|
||||
harness.make_deposits(&mut state, 1, None, Some(SignatureBytes::empty()));
|
||||
let deposits = VariableList::from(deposits);
|
||||
|
||||
let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0;
|
||||
*head_block.to_mut().body_mut().deposits_mut() = deposits;
|
||||
|
||||
let result =
|
||||
process_operations::process_deposits(&mut state, head_block.body().deposits(), &spec);
|
||||
let result = process_operations::process_deposits(state, head_block.body().deposits(), &spec);
|
||||
// Expecting Ok(()) even though the block signature does not correspond to the correct public key
|
||||
assert_eq!(result, Ok(()));
|
||||
}
|
||||
@ -308,15 +303,14 @@ fn invalid_deposit_invalid_pub_key() {
|
||||
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT);
|
||||
let mut state = harness.get_current_state();
|
||||
|
||||
let (deposits, mut state) =
|
||||
let (deposits, state) =
|
||||
harness.make_deposits(&mut state, 1, Some(PublicKeyBytes::empty()), None);
|
||||
let deposits = VariableList::from(deposits);
|
||||
|
||||
let mut head_block = harness.chain.head_beacon_block().unwrap().deconstruct().0;
|
||||
*head_block.to_mut().body_mut().deposits_mut() = deposits;
|
||||
|
||||
let result =
|
||||
process_operations::process_deposits(&mut state, head_block.body().deposits(), &spec);
|
||||
let result = process_operations::process_deposits(state, head_block.body().deposits(), &spec);
|
||||
|
||||
// Expecting Ok(()) even though we passed in invalid publickeybytes in the public key field of the deposit data.
|
||||
assert_eq!(result, Ok(()));
|
||||
|
@ -62,7 +62,8 @@ pub enum Step<B, A, P> {
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct Meta {
|
||||
description: String,
|
||||
#[serde(rename(deserialize = "description"))]
|
||||
_description: String,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
|
@ -7,7 +7,8 @@ use types::{BeaconState, EthSpec, ForkName};
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct Metadata {
|
||||
description: String,
|
||||
#[serde(rename(deserialize = "description"))]
|
||||
_description: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
|
@ -15,7 +15,8 @@ use types::{BitList, BitVector, FixedVector, ForkName, VariableList};
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
struct Metadata {
|
||||
root: String,
|
||||
signing_root: Option<String>,
|
||||
#[serde(rename(deserialize = "signing_root"))]
|
||||
_signing_root: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
|
@ -10,7 +10,8 @@ use types::{BeaconBlock, BeaconState, ForkName, Hash256, SignedBeaconBlock};
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
struct SszStaticRoots {
|
||||
root: String,
|
||||
signing_root: Option<String>,
|
||||
#[serde(rename(deserialize = "signing_root"))]
|
||||
_signing_root: Option<String>,
|
||||
}
|
||||
|
||||
/// Runner for types that implement `ssz::Decode`.
|
||||
|
@ -131,7 +131,7 @@ impl Config {
|
||||
if let Some(beacon_nodes) = parse_optional::<String>(cli_args, "beacon-nodes")? {
|
||||
config.beacon_nodes = beacon_nodes
|
||||
.split(',')
|
||||
.map(|s| SensitiveUrl::parse(s))
|
||||
.map(SensitiveUrl::parse)
|
||||
.collect::<Result<_, _>>()
|
||||
.map_err(|e| format!("Unable to parse beacon node URL: {:?}", e))?;
|
||||
}
|
||||
|
@ -84,7 +84,6 @@ pub struct ProductionValidatorClient<T: EthSpec> {
|
||||
doppelganger_service: Option<Arc<DoppelgangerService>>,
|
||||
validator_store: Arc<ValidatorStore<SystemTimeSlotClock, T>>,
|
||||
http_api_listen_addr: Option<SocketAddr>,
|
||||
http_metrics_ctx: Option<Arc<http_metrics::Context<T>>>,
|
||||
config: Config,
|
||||
}
|
||||
|
||||
@ -431,7 +430,6 @@ impl<T: EthSpec> ProductionValidatorClient<T> {
|
||||
validator_store,
|
||||
config,
|
||||
http_api_listen_addr: None,
|
||||
http_metrics_ctx,
|
||||
})
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user